From 4f62f36af73eaa9717f6ae57537007e14b4a3ed3 Mon Sep 17 00:00:00 2001 From: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> Date: Thu, 7 Jan 2021 08:43:56 +0000 Subject: [PATCH] Update cortex to 1 6 (#3131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update Cortex to recent master (1.5.0+, 35e698bb56d6). Signed-off-by: Peter Štibraný * Ignore set scheduler address. Signed-off-by: Peter Štibraný * Make linter happy. Signed-off-by: Peter Štibraný * Upgrade cortex to 1.6 Signed-off-by: Michel Hollands * Work around issues in go-openapi libraries Signed-off-by: Michel Hollands * Clean up go.mod and go.sum Signed-off-by: Michel Hollands Co-authored-by: Peter Štibraný --- go.mod | 24 +- go.sum | 109 +- pkg/distributor/distributor_test.go | 6 +- pkg/ingester/checkpoint.go | 2 +- pkg/loki/loki.go | 6 +- pkg/loki/modules.go | 57 +- pkg/lokifrontend/config.go | 19 +- pkg/querier/ingester_querier.go | 6 +- pkg/querier/querier_mock_test.go | 6 +- pkg/querier/queryrange/roundtrip.go | 17 +- pkg/querier/queryrange/roundtrip_test.go | 15 +- pkg/util/validation/limits.go | 7 + .../Azure/go-autorest/autorest/azure/azure.go | 5 + .../asaskevich/govalidator/.travis.yml | 22 +- .../asaskevich/govalidator/CODE_OF_CONDUCT.md | 43 + .../github.com/asaskevich/govalidator/LICENSE | 2 +- .../asaskevich/govalidator/README.md | 42 +- .../asaskevich/govalidator/arrays.go | 29 + .../asaskevich/govalidator/converter.go | 33 +- .../asaskevich/govalidator/error.go | 6 +- .../github.com/asaskevich/govalidator/go.mod | 2 +- .../asaskevich/govalidator/numerics.go | 37 +- .../asaskevich/govalidator/patterns.go | 6 +- .../asaskevich/govalidator/types.go | 25 +- .../asaskevich/govalidator/validator.go | 309 +- .../aws-sdk-go/aws/credentials/credentials.go | 74 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 325 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/ini/ini_parser.go | 7 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 1353 ++- .../dynamodb/dynamodbiface/interface.go | 15 + .../aws/aws-sdk-go/service/dynamodb/errors.go | 21 + .../aws/aws-sdk-go/service/ec2/api.go | 2018 ++++- .../aws/aws-sdk-go/service/s3/api.go | 2132 ++++- .../aws/aws-sdk-go/service/s3/errors.go | 6 + .../service/s3/s3iface/interface.go | 16 + .../cortex/pkg/alertmanager/alertmanager.go | 24 +- .../pkg/alertmanager/alertmanager_metrics.go | 61 +- .../cortex/pkg/alertmanager/api.go | 8 +- .../cortex/pkg/alertmanager/multitenant.go | 78 +- .../cortex/pkg/alertmanager/storage.go | 10 + .../cortexproject/cortex/pkg/api/api.go | 217 +- .../cortexproject/cortex/pkg/api/handlers.go | 143 +- .../cortex/pkg/api/middlewares.go | 4 +- .../pkg/chunk/aws/dynamodb_storage_client.go | 8 + .../cortex/pkg/chunk/aws/s3_storage_client.go | 75 +- .../pkg/chunk/cassandra/storage_client.go | 28 +- .../cortexproject/cortex/pkg/chunk/chunk.go | 55 +- .../cortex/pkg/chunk/chunk_store.go | 31 +- .../cortex/pkg/chunk/encoding/bigchunk.go | 4 + .../cortex/pkg/chunk/encoding/chunk.go | 54 +- .../cortex/pkg/chunk/encoding/doubledelta.go | 4 + .../cortex/pkg/chunk/encoding/varbit.go | 4 + .../cortex/pkg/chunk/grpc/grpc_client.go | 2 +- .../pkg/chunk/purger/blocks_purger_api.go | 121 + .../pkg/chunk/purger/delete_requests_store.go | 31 +- .../cortex/pkg/chunk/purger/purger.go | 4 +- .../pkg/chunk/purger/request_handler.go | 8 +- .../cortex/pkg/chunk/schema_util.go | 33 +- .../pkg/chunk/storage/caching_index_client.go | 4 +- .../cortex/pkg/chunk/storage/factory.go | 3 + .../cortex/pkg/chunk/table_manager.go | 8 +- .../cortex/pkg/compactor/blocks_cleaner.go | 88 +- .../cortex/pkg/compactor/compactor.go | 229 +- .../cortex/pkg/compactor/compactor_ring.go | 10 +- .../cortex/pkg/compactor/users_scanner.go | 47 - .../cortex/pkg/configs/api/api.go | 10 +- .../cortexproject/cortex/pkg/cortex/cortex.go | 127 +- .../cortex/pkg/cortex/modules.go | 363 +- .../cortex/pkg/distributor/distributor.go | 37 +- .../pkg/distributor/distributor_ring.go | 2 +- .../cortex/pkg/distributor/query.go | 14 +- .../cortex/pkg/frontend/config.go | 80 + .../pkg/frontend/downstream_roundtripper.go | 40 + .../cortex/pkg/frontend/transport/handler.go | 202 + .../pkg/frontend/transport/roundtripper.go | 47 + .../cortex/pkg/frontend/v1/frontend.go | 288 + .../v1/frontendv1pb}/frontend.pb.go | 131 +- .../v1/frontendv1pb}/frontend.proto | 6 +- .../cortex/pkg/frontend/v2/frontend.go | 315 + .../frontend/v2/frontend_scheduler_worker.go | 327 + .../frontend/v2/frontendv2pb/frontend.pb.go | 782 ++ .../frontend/v2/frontendv2pb/frontend.proto | 28 + .../cortex/pkg/ingester/client/cortex.pb.go | 384 +- .../cortex/pkg/ingester/client/cortex.proto | 6 +- .../cortex/pkg/ingester/ingester.go | 22 +- .../cortex/pkg/ingester/ingester_v2.go | 623 +- .../cortex/pkg/ingester/metrics.go | 88 +- .../cortex/pkg/ingester/user_state.go | 4 +- .../cortexproject/cortex/pkg/ingester/wal.go | 7 +- .../cortex/pkg/querier/block_meta.go | 52 - .../pkg/querier/blocks_consistency_checker.go | 19 +- .../cortex/pkg/querier/blocks_scanner.go | 95 +- .../pkg/querier/blocks_store_queryable.go | 431 +- .../pkg/querier/chunk_store_queryable.go | 12 +- .../cortex/pkg/querier/chunks_handler.go | 4 +- .../pkg/querier/distributor_queryable.go | 15 +- .../cortex/pkg/querier/frontend/frontend.go | 531 -- .../cortex/pkg/querier/frontend/worker.go | 219 - .../frontend/worker_frontend_manager.go | 172 - .../cortex/pkg/querier/querier.go | 200 +- .../cortex/pkg/querier/queryrange/limits.go | 113 +- .../pkg/querier/queryrange/query_range.go | 21 +- .../pkg/querier/queryrange/results_cache.go | 4 +- .../cortex/pkg/querier/queryrange/retry.go | 4 +- .../pkg/querier/queryrange/roundtrip.go | 25 +- .../cortex/pkg/querier/queryrange/util.go | 71 + .../cortex/pkg/querier/stats/stats.go | 62 + .../cortex/pkg/querier/stats/stats.pb.go | 414 + .../cortex/pkg/querier/stats/stats.proto | 16 + .../pkg/querier/stats/time_middleware.go | 29 + .../pkg/querier/worker/frontend_processor.go | 139 + .../pkg/querier/worker/processor_manager.go | 75 + .../pkg/querier/worker/scheduler_processor.go | 219 + .../cortex/pkg/querier/worker/worker.go | 269 + .../pkg/ring/client/ring_service_discovery.go | 2 +- .../cortex/pkg/ring/lifecycler.go | 55 +- .../cortexproject/cortex/pkg/ring/model.go | 3 + .../cortex/pkg/ring/replication_set.go | 72 +- .../pkg/ring/replication_set_tracker.go | 96 + .../cortex/pkg/ring/replication_strategy.go | 23 +- .../cortexproject/cortex/pkg/ring/ring.go | 126 +- .../cortexproject/cortex/pkg/ring/util.go | 34 + .../cortexproject/cortex/pkg/ruler/api.go | 11 +- .../cortexproject/cortex/pkg/ruler/manager.go | 2 +- .../cortex/pkg/ruler/manager_metrics.go | 42 +- .../cortexproject/cortex/pkg/ruler/ruler.go | 28 +- .../cortexproject/cortex/pkg/ruler/storage.go | 3 + .../cortex/pkg/scheduler/queue/queue.go | 189 + .../queue/user_queues.go} | 10 +- .../cortex/pkg/scheduler/scheduler.go | 460 + .../pkg/scheduler/schedulerpb/scheduler.pb.go | 1809 ++++ .../pkg/scheduler/schedulerpb/scheduler.proto | 85 + .../azure/bucket_client.go | 0 .../{backend => bucket}/azure/config.go | 6 +- .../cortex/pkg/storage/bucket/client.go | 132 + .../client_mock.go} | 38 +- .../filesystem/bucket_client.go | 0 .../{backend => bucket}/filesystem/config.go | 6 +- .../{backend => bucket}/gcs/bucket_client.go | 0 .../storage/{backend => bucket}/gcs/config.go | 6 +- .../{backend => bucket}/s3/bucket_client.go | 2 + .../storage/{backend => bucket}/s3/config.go | 42 +- .../pkg/storage/bucket/swift/bucket_client.go | 37 + .../cortex/pkg/storage/bucket/swift/config.go | 46 + .../{tsdb => bucket}/user_bucket_client.go | 2 +- .../cortex/pkg/storage/tsdb/bucket_client.go | 57 - .../pkg/storage/tsdb/bucketindex/index.go | 199 + .../pkg/storage/tsdb/bucketindex/markers.go | 38 + .../tsdb/bucketindex/markers_bucket_client.go | 121 + .../pkg/storage/tsdb/bucketindex/reader.go | 50 + .../pkg/storage/tsdb/bucketindex/writer.go | 255 + .../cortex/pkg/storage/tsdb/caching_bucket.go | 11 +- .../cortex/pkg/storage/tsdb/config.go | 97 +- .../cortex/pkg/storage/tsdb/index_cache.go | 2 +- .../storage/tsdb/memcache_client_config.go | 2 +- .../pkg/storage/tsdb/tenant_deletion_mark.go | 40 + .../cortex/pkg/storage/tsdb/users_scanner.go | 72 + .../pkg/storegateway/bucket_store_metrics.go | 65 +- .../cortex/pkg/storegateway/bucket_stores.go | 46 +- .../cortex/pkg/storegateway/gateway.go | 17 +- .../storegateway/metadata_fetcher_metrics.go | 27 +- .../storegateway/storegatewaypb/gateway.pb.go | 103 +- .../storegateway/storegatewaypb/gateway.proto | 6 + .../cortex/pkg/tenant/resolver.go | 132 + .../cortexproject/cortex/pkg/tenant/tenant.go | 89 + .../cortex/pkg/util/concurrency/buffer.go | 25 + .../cortex/pkg/util/concurrency/runner.go | 64 + .../cortex/pkg/util/dns_watcher.go | 82 + .../cortexproject/cortex/pkg/util/errors.go | 4 +- .../cortex/pkg/util/fakeauth/fake_auth.go | 22 +- .../cortex/pkg/util/grpcclient/grpcclient.go | 7 + .../cortex/pkg/util/grpcutil/carrier.go | 40 + .../cortexproject/cortex/pkg/util/log.go | 5 +- .../cortex/pkg/util/metrics_helper.go | 263 +- .../cortex/pkg/util/modules/modules.go | 2 +- .../cortex/pkg/util/process/collector.go | 132 + .../cortex/pkg/util/spanlogger/spanlogger.go | 44 +- .../cortex/pkg/util/test/poll.go | 2 +- .../cortexproject/cortex/pkg/util/time.go | 11 + .../cortex/pkg/util/validation/limits.go | 24 +- .../cortex/pkg/util/validation/validate.go | 8 +- .../github.com/digitalocean/godo/CHANGELOG.md | 38 + .../digitalocean/godo/CONTRIBUTING.md | 25 +- .../github.com/digitalocean/godo/apps.gen.go | 144 +- vendor/github.com/digitalocean/godo/apps.go | 120 +- .../github.com/digitalocean/godo/databases.go | 5 +- .../github.com/digitalocean/godo/domains.go | 1 + .../github.com/digitalocean/godo/droplets.go | 1 + .../github.com/digitalocean/godo/firewalls.go | 1 + .../digitalocean/godo/floating_ips.go | 1 + vendor/github.com/digitalocean/godo/godo.go | 2 +- vendor/github.com/digitalocean/godo/images.go | 1 + .../github.com/digitalocean/godo/invoices.go | 6 +- .../digitalocean/godo/kubernetes.go | 83 + .../digitalocean/godo/load_balancers.go | 1 + .../github.com/digitalocean/godo/projects.go | 3 +- .../github.com/digitalocean/godo/registry.go | 215 +- .../github.com/digitalocean/godo/storage.go | 1 + .../github.com/digitalocean/godo/strings.go | 2 + vendor/github.com/digitalocean/godo/vpcs.go | 15 + .../go-openapi/strfmt/.golangci.yml | 9 + .../github.com/go-openapi/strfmt/.travis.yml | 24 +- vendor/github.com/go-openapi/strfmt/format.go | 2 +- vendor/github.com/go-openapi/strfmt/go.mod | 13 +- vendor/github.com/go-openapi/strfmt/go.sum | 127 +- vendor/github.com/go-openapi/strfmt/time.go | 29 +- .../golang/protobuf/jsonpb/encode.go | 11 +- .../golang/protobuf/proto/text_decode.go | 2 +- .../gophercloud/gophercloud/CHANGELOG.md | 32 +- .../gophercloud/gophercloud/README.md | 7 + .../openstack/compute/v2/servers/requests.go | 13 + vendor/github.com/lann/builder/.travis.yml | 5 +- vendor/github.com/lann/builder/LICENSE | 21 + vendor/github.com/lann/builder/registry.go | 16 +- vendor/github.com/miekg/dns/.travis.yml | 4 +- vendor/github.com/miekg/dns/README.md | 8 +- vendor/github.com/miekg/dns/client.go | 21 +- vendor/github.com/miekg/dns/dnssec.go | 46 +- vendor/github.com/miekg/dns/dnssec_keygen.go | 4 +- vendor/github.com/miekg/dns/dnssec_keyscan.go | 18 +- vendor/github.com/miekg/dns/dnssec_privkey.go | 20 +- vendor/github.com/miekg/dns/doc.go | 2 +- vendor/github.com/miekg/dns/msg_helpers.go | 67 + vendor/github.com/miekg/dns/msg_truncate.go | 11 +- vendor/github.com/miekg/dns/scan.go | 29 +- vendor/github.com/miekg/dns/scan_rr.go | 19 +- vendor/github.com/miekg/dns/serve_mux.go | 4 +- vendor/github.com/miekg/dns/server.go | 110 +- vendor/github.com/miekg/dns/sig0.go | 14 +- vendor/github.com/miekg/dns/svcb.go | 744 ++ vendor/github.com/miekg/dns/tsig.go | 8 +- vendor/github.com/miekg/dns/types.go | 2 + vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/zduplicate.go | 42 + vendor/github.com/miekg/dns/zmsg.go | 82 + vendor/github.com/miekg/dns/ztypes.go | 25 + .../mitchellh/mapstructure/.travis.yml | 2 +- .../mitchellh/mapstructure/CHANGELOG.md | 21 + .../mitchellh/mapstructure/mapstructure.go | 172 +- .../prometheus/alertmanager/api/v1/api.go | 2 +- .../prometheus/alertmanager/api/v2/api.go | 2 +- .../alertmanager/asset/assets_vfsdata.go | 4 +- .../alertmanager/config/coordinator.go | 13 +- .../prometheus/alertmanager/notify/notify.go | 52 +- .../notify/pagerduty/pagerduty.go | 30 +- .../client_golang/api/prometheus/v1/api.go | 68 +- .../client_golang/prometheus/counter.go | 20 +- .../client_golang/prometheus/desc.go | 2 +- .../client_golang/prometheus/gauge.go | 20 +- .../client_golang/prometheus/go_collector.go | 7 +- .../client_golang/prometheus/histogram.go | 22 +- .../client_golang/prometheus/metric.go | 2 +- .../client_golang/prometheus/summary.go | 26 +- .../client_golang/prometheus/value.go | 13 +- .../client_golang/prometheus/vec.go | 114 +- .../client_golang/prometheus/wrap.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 11 + .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../github.com/prometheus/procfs/cpuinfo.go | 44 + .../{cpuinfo_arm64.go => cpuinfo_armx.go} | 2 +- .../prometheus/procfs/cpuinfo_mips.go | 18 - .../prometheus/procfs/cpuinfo_mips64.go | 18 - .../prometheus/procfs/cpuinfo_mipsle.go | 18 - .../{cpuinfo_mips64le.go => cpuinfo_mipsx.go} | 1 + .../{cpuinfo_arm.go => cpuinfo_others.go} | 3 +- .../prometheus/procfs/cpuinfo_ppc64le.go | 18 - .../{cpuinfo_ppc64.go => cpuinfo_ppcx.go} | 1 + .../{cpuinfo_default.go => cpuinfo_x86.go} | 0 .../prometheus/procfs/fixtures.ttar | 75 +- .../prometheus/procfs/kernel_random.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 5 +- .../prometheus/procfs/proc_status.go | 6 +- .../prometheus/prometheus/config/config.go | 21 +- .../prometheus/discovery/discovery.go | 2 +- .../discovery/dockerswarm/dockerswarm.go | 30 +- .../discovery/dockerswarm/network.go | 1 + .../prometheus/discovery/dockerswarm/nodes.go | 2 +- .../discovery/dockerswarm/services.go | 2 +- .../prometheus/discovery/dockerswarm/tasks.go | 2 +- .../discovery/openstack/instance.go | 1 + .../prometheus/discovery/registry.go | 3 +- .../prometheus/notifier/notifier.go | 4 +- .../prometheus/pkg/labels/labels.go | 36 +- .../prometheus/pkg/textparse/interface.go | 16 +- .../prometheus/prometheus/prompb/remote.pb.go | 133 +- .../prometheus/prometheus/prompb/remote.proto | 4 + .../prometheus/prometheus/prompb/types.pb.go | 467 +- .../prometheus/prometheus/prompb/types.proto | 20 + .../prometheus/prometheus/promql/engine.go | 1 + .../promql/parser/generated_parser.y.go | 7 +- .../prometheus/promql/parser/parse.go | 2 +- .../prometheus/promql/parser/printer.go | 1 + .../prometheus/prometheus/promql/test.go | 1 + .../prometheus/prometheus/promql/value.go | 3 +- .../prometheus/prometheus/rules/alerting.go | 6 +- .../prometheus/prometheus/scrape/manager.go | 2 +- .../prometheus/prometheus/scrape/scrape.go | 50 +- .../prometheus/prometheus/scrape/target.go | 1 + .../prometheus/prometheus/storage/fanout.go | 10 +- .../prometheus/prometheus/storage/merge.go | 15 +- .../prometheus/storage/remote/client.go | 2 +- .../prometheus/storage/remote/codec.go | 14 + .../prometheus/storage/remote/intern.go | 3 +- .../remote/{max_gauge.go => max_timestamp.go} | 12 +- .../storage/remote/metadata_watcher.go | 163 + .../storage/remote/queue_manager.go | 275 +- .../prometheus/storage/remote/read.go | 1 + .../prometheus/storage/remote/storage.go | 13 +- .../prometheus/storage/remote/write.go | 13 +- .../prometheus/template/template.go | 7 +- .../prometheus/prometheus/tsdb/CHANGELOG.md | 2 +- .../prometheus/prometheus/tsdb/README.md | 8 +- .../prometheus/prometheus/tsdb/block.go | 29 +- .../prometheus/prometheus/tsdb/blockwriter.go | 3 +- .../prometheus/tsdb/chunkenc/chunk.go | 7 +- .../prometheus/tsdb/chunks/chunks.go | 40 +- .../prometheus/tsdb/chunks/head_chunks.go | 124 +- .../prometheus/prometheus/tsdb/compact.go | 47 +- .../prometheus/prometheus/tsdb/db.go | 244 +- .../prometheus/tsdb/errors/errors.go | 77 +- .../prometheus/tsdb/fileutil/mmap.go | 2 +- .../prometheus/prometheus/tsdb/head.go | 68 +- .../prometheus/prometheus/tsdb/index/index.go | 58 +- .../prometheus/prometheus/tsdb/querier.go | 66 +- .../prometheus/tsdb/record/record.go | 1 + .../prometheus/prometheus/tsdb/repair.go | 17 +- .../prometheus/tsdb/tombstones/tombstones.go | 7 +- .../prometheus/tsdb/tsdbblockutil.go | 1 + .../prometheus/prometheus/tsdb/wal.go | 1 + .../prometheus/tsdb/wal/checkpoint.go | 7 +- .../prometheus/prometheus/tsdb/wal/wal.go | 25 +- .../prometheus/prometheus/tsdb/wal/watcher.go | 1 + .../prometheus/util/testutil/directory.go | 18 +- .../prometheus/util/testutil/testing.go | 129 - .../prometheus/prometheus/web/api/v1/api.go | 5 +- .../thanos-io/thanos/pkg/block/block.go | 97 +- .../thanos-io/thanos/pkg/block/fetcher.go | 111 +- .../thanos-io/thanos/pkg/block/index.go | 156 +- .../pkg/block/indexheader/binary_reader.go | 8 +- .../thanos/pkg/block/indexheader/header.go | 4 +- .../block/indexheader/lazy_binary_reader.go | 273 + .../pkg/block/indexheader/reader_pool.go | 147 + .../thanos/pkg/block/metadata/deletionmark.go | 76 - .../thanos/pkg/block/metadata/markers.go | 119 + .../thanos/pkg/block/metadata/meta.go | 120 +- .../thanos-io/thanos/pkg/block/writer.go | 184 + .../thanos-io/thanos/pkg/compact/compact.go | 248 +- .../pkg/compact/downsample/downsample.go | 4 +- .../downsample/streamed_block_writer.go | 10 +- .../thanos-io/thanos/pkg/compact/planner.go | 303 + .../thanos-io/thanos/pkg/compact/retention.go | 3 +- .../thanos/pkg/component/component.go | 2 + .../thanos/pkg/discovery/dns/provider.go | 4 +- .../thanos/pkg/errutil/multierror.go.go | 51 + .../thanos/pkg/objstore/azure/helpers.go | 11 + .../pkg/objstore/filesystem/filesystem.go | 4 +- .../thanos-io/thanos/pkg/objstore/objstore.go | 2 +- .../thanos-io/thanos/pkg/objstore/s3/s3.go | 72 +- .../thanos/pkg/promclient/promclient.go | 33 +- .../thanos-io/thanos/pkg/runutil/runutil.go | 7 +- .../thanos-io/thanos/pkg/shipper/shipper.go | 4 +- .../thanos-io/thanos/pkg/store/bucket.go | 345 +- .../thanos/pkg/store/hintspb/custom.go | 12 + .../thanos/pkg/store/hintspb/hints.pb.go | 869 +- .../thanos/pkg/store/hintspb/hints.proto | 25 + .../thanos-io/thanos/pkg/store/multitsdb.go | 6 +- .../thanos/pkg/store/storepb/rpc.pb.go | 357 +- .../thanos/pkg/store/storepb/rpc.proto | 20 + .../pkg/store/storepb/testutil/series.go | 258 - .../thanos-io/thanos/pkg/store/tsdb.go | 5 +- .../common/middleware/http_tracing.go | 15 + .../common/middleware/instrument.go | 13 +- .../weaveworks/common/server/server.go | 30 +- .../bson/bsoncodec/byte_slice_codec.go | 3 + .../bson/bsoncodec/cond_addr_codec.go | 63 + .../bson/bsoncodec/default_value_decoders.go | 119 +- .../bson/bsoncodec/default_value_encoders.go | 33 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 135 +- .../bson/bsoncodec/pointer_codec.go | 9 +- .../mongo-driver/bson/bsoncodec/registry.go | 44 +- .../bson/bsoncodec/slice_codec.go | 7 +- .../bson/bsoncodec/string_codec.go | 4 + .../bson/bsoncodec/struct_codec.go | 71 +- .../mongo-driver/bson/bsoncodec/time_codec.go | 8 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 4 + .../bson/bsonoptions/map_codec_options.go | 21 +- .../bson/bsonrw/extjson_wrappers.go | 2 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 15 +- .../mongo-driver/bson/primitive/objectid.go | 19 +- .../mongo-driver/bson/primitive/primitive.go | 23 +- .../mongo-driver/bson/raw_value.go | 12 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 7 +- .../mongo-driver/x/bsonx/bsoncore/document.go | 3 +- .../golang.org/x/net/http/httpproxy/proxy.go | 6 +- vendor/golang.org/x/net/http2/transport.go | 31 +- .../idna/{tables12.00.go => tables12.0.0.go} | 2 +- vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 ++++++++++ .../x/net/internal/socket/cmsghdr.go | 2 +- .../x/net/internal/socket/cmsghdr_stub.go | 2 +- .../net/internal/socket/cmsghdr_zos_s390x.go | 25 + .../x/net/internal/socket/error_unix.go | 2 +- .../x/net/internal/socket/iovec_64bit.go | 2 +- .../x/net/internal/socket/iovec_stub.go | 2 +- .../x/net/internal/socket/msghdr_stub.go | 2 +- .../x/net/internal/socket/msghdr_zos_s390x.go | 36 + .../x/net/internal/socket/rawconn_msg.go | 7 +- .../x/net/internal/socket/rawconn_nomsg.go | 2 +- .../x/net/internal/socket/sys_const_zos.go | 17 + .../x/net/internal/socket/sys_posix.go | 2 +- .../x/net/internal/socket/sys_stub.go | 2 +- .../x/net/internal/socket/sys_zos_s390x.go | 38 + .../x/net/internal/socket/sys_zos_s390x.s | 11 + .../x/net/internal/socket/zsys_zos_s390x.go | 32 + vendor/golang.org/x/net/ipv4/control_stub.go | 2 +- vendor/golang.org/x/net/ipv4/control_zos.go | 86 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 2 +- .../golang.org/x/net/ipv4/payload_nocmsg.go | 2 +- vendor/golang.org/x/net/ipv4/sockopt_posix.go | 2 +- vendor/golang.org/x/net/ipv4/sockopt_stub.go | 2 +- vendor/golang.org/x/net/ipv4/sys_stub.go | 2 +- vendor/golang.org/x/net/ipv4/sys_zos.go | 55 + .../golang.org/x/net/ipv4/zsys_zos_s390x.go | 80 + .../x/net/ipv6/control_rfc3542_unix.go | 2 +- vendor/golang.org/x/net/ipv6/control_stub.go | 2 +- vendor/golang.org/x/net/ipv6/control_unix.go | 2 +- vendor/golang.org/x/net/ipv6/icmp_stub.go | 2 +- vendor/golang.org/x/net/ipv6/icmp_zos.go | 29 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 2 +- .../golang.org/x/net/ipv6/payload_nocmsg.go | 2 +- vendor/golang.org/x/net/ipv6/sockopt_posix.go | 2 +- vendor/golang.org/x/net/ipv6/sockopt_stub.go | 2 +- vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 2 +- .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 2 +- vendor/golang.org/x/net/ipv6/sys_stub.go | 2 +- vendor/golang.org/x/net/ipv6/sys_zos.go | 70 + .../golang.org/x/net/ipv6/zsys_zos_s390x.go | 106 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 2 +- vendor/golang.org/x/sys/cpu/cpu_arm64.go | 39 +- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 2 +- vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 121 +- .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 3 +- .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 12 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 150 +- vendor/golang.org/x/sys/cpu/cpu_s390x.s | 2 +- vendor/golang.org/x/sys/cpu/cpu_x86.s | 2 +- vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 2 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_386.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 2 +- .../golang.org/x/sys/unix/asm_darwin_arm64.s | 2 +- .../x/sys/unix/asm_dragonfly_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 2 +- vendor/golang.org/x/sys/unix/asm_linux_386.s | 2 +- .../golang.org/x/sys/unix/asm_linux_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_linux_arm.s | 2 +- .../golang.org/x/sys/unix/asm_linux_arm64.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mips64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_mipsx.s | 2 +- .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 2 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 2 +- vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_386.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_arm.s | 2 +- .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 2 +- .../x/sys/unix/asm_openbsd_mips64.s | 2 +- .../golang.org/x/sys/unix/asm_solaris_amd64.s | 2 +- vendor/golang.org/x/sys/unix/endian_big.go | 2 +- vendor/golang.org/x/sys/unix/endian_little.go | 2 +- .../x/sys/unix/fcntl_linux_32bit.go | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 8 + vendor/golang.org/x/sys/unix/syscall.go | 43 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 16 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 62 +- .../x/sys/unix/syscall_darwin_386.go | 7 +- .../x/sys/unix/syscall_darwin_amd64.go | 7 +- .../x/sys/unix/syscall_darwin_arm.go | 4 +- .../x/sys/unix/syscall_darwin_arm64.go | 9 +- .../x/sys/unix/syscall_dragonfly.go | 17 + .../golang.org/x/sys/unix/syscall_freebsd.go | 4 + .../x/sys/unix/syscall_linux_386.go | 3 - .../x/sys/unix/syscall_linux_amd64_gc.go | 2 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 2 +- .../x/sys/unix/syscall_linux_gc_386.go | 2 +- .../x/sys/unix/syscall_linux_gc_arm.go | 2 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 4 + .../golang.org/x/sys/unix/syscall_openbsd.go | 4 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 2 +- .../x/sys/unix/zerrors_darwin_386.go | 2 + .../x/sys/unix/zerrors_darwin_amd64.go | 2 + .../x/sys/unix/zerrors_darwin_arm.go | 2 + .../x/sys/unix/zerrors_darwin_arm64.go | 2 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 16 +- .../x/sys/unix/zerrors_solaris_amd64.go | 22 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 2 +- .../x/sys/unix/zsyscall_darwin_386.go | 30 +- .../x/sys/unix/zsyscall_darwin_386.s | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 30 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 4 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 15 + .../x/sys/unix/zsyscall_darwin_arm64.s | 2 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsysnum_darwin_386.go | 437 + .../x/sys/unix/zsysnum_darwin_amd64.go | 439 + .../x/sys/unix/zsysnum_darwin_arm.go | 437 + .../x/sys/unix/zsysnum_darwin_arm64.go | 437 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_darwin_386.go | 15 + .../x/sys/unix/ztypes_darwin_amd64.go | 15 + .../x/sys/unix/ztypes_darwin_arm.go | 15 + .../x/sys/unix/ztypes_darwin_arm64.go | 15 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 709 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 15 + .../x/sys/unix/ztypes_linux_amd64.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 16 + .../x/sys/unix/ztypes_linux_arm64.go | 16 + .../x/sys/unix/ztypes_linux_mips.go | 16 + .../x/sys/unix/ztypes_linux_mips64.go | 16 + .../x/sys/unix/ztypes_linux_mips64le.go | 16 + .../x/sys/unix/ztypes_linux_mipsle.go | 16 + .../x/sys/unix/ztypes_linux_ppc64.go | 16 + .../x/sys/unix/ztypes_linux_ppc64le.go | 16 + .../x/sys/unix/ztypes_linux_riscv64.go | 16 + .../x/sys/unix/ztypes_linux_s390x.go | 16 + .../x/sys/unix/ztypes_linux_sparc64.go | 16 + .../x/sys/unix/ztypes_solaris_amd64.go | 31 +- .../golang.org/x/sys/windows/dll_windows.go | 2 + .../x/sys/windows/memory_windows.go | 20 +- .../sys/windows/registry/zsyscall_windows.go | 39 +- .../x/sys/windows/security_windows.go | 3 +- .../x/sys/windows/setupapierrors_windows.go | 100 + vendor/golang.org/x/sys/windows/syscall.go | 46 +- .../x/sys/windows/syscall_windows.go | 34 +- .../golang.org/x/sys/windows/types_windows.go | 29 + .../x/sys/windows/zsyscall_windows.go | 4370 ++++------ .../x/text/unicode/bidi/tables12.0.0.go | 2 +- .../x/text/unicode/bidi/tables13.0.0.go | 1955 +++++ .../x/text/unicode/norm/tables12.0.0.go | 2 +- .../x/text/unicode/norm/tables13.0.0.go | 7760 +++++++++++++++++ .../golang.org/x/text/width/tables12.0.0.go | 2 +- .../golang.org/x/text/width/tables13.0.0.go | 1351 +++ .../x/tools/internal/gocommand/invoke.go | 59 +- .../x/tools/internal/gocommand/version.go | 40 + .../x/tools/internal/imports/fix.go | 34 +- .../x/tools/internal/imports/mod.go | 35 +- .../v1/cloudresourcemanager-api.json | 73 +- .../v1/cloudresourcemanager-gen.go | 178 +- .../api/compute/v1/compute-api.json | 1744 +++- .../api/compute/v1/compute-gen.go | 4181 +++++++-- .../api/internal/service-account.json | 12 - .../api/internal/settings.go | 1 + .../option/internaloption/internaloption.go | 15 + .../api/storage/v1/storage-api.json | 12 +- .../api/storage/v1/storage-gen.go | 122 +- .../api/transport/grpc/dial.go | 69 +- .../api/transport/http/dial.go | 100 +- .../api/transport/internal/dca/dca.go | 145 + vendor/k8s.io/api/apps/v1beta1/types.go | 16 +- .../zz_generated.prerelease-lifecycle.go | 16 +- vendor/k8s.io/api/apps/v1beta2/types.go | 22 +- .../zz_generated.prerelease-lifecycle.go | 22 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 24 +- .../zz_generated.prerelease-lifecycle.go | 24 +- vendor/k8s.io/klog/v2/README.md | 6 +- vendor/k8s.io/klog/v2/SECURITY.md | 22 + vendor/k8s.io/klog/v2/klog.go | 168 +- vendor/modules.txt | 103 +- 596 files changed, 53000 insertions(+), 10107 deletions(-) create mode 100644 vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/compactor/users_scanner.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/config.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go rename vendor/github.com/cortexproject/cortex/pkg/{querier/frontend => frontend/v1/frontendv1pb}/frontend.pb.go (82%) rename vendor/github.com/cortexproject/cortex/pkg/{querier/frontend => frontend/v1/frontendv1pb}/frontend.proto (74%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/block_meta.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker_frontend_manager.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go rename vendor/github.com/cortexproject/cortex/pkg/{querier/frontend/frontend_querier_queues.go => scheduler/queue/user_queues.go} (97%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/azure/bucket_client.go (100%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/azure/config.go (86%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go rename vendor/github.com/cortexproject/cortex/pkg/storage/{tsdb/bucket_client_mock.go => bucket/client_mock.go} (66%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/filesystem/bucket_client.go (100%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/filesystem/config.go (65%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/gcs/bucket_client.go (100%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/gcs/config.go (78%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/s3/bucket_client.go (90%) rename vendor/github.com/cortexproject/cortex/pkg/storage/{backend => bucket}/s3/config.go (60%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go rename vendor/github.com/cortexproject/cortex/pkg/storage/{tsdb => bucket}/user_bucket_client.go (99%) delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/dns_watcher.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/carrier.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go create mode 100644 vendor/github.com/lann/builder/LICENSE create mode 100644 vendor/github.com/miekg/dns/svcb.go create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md rename vendor/github.com/prometheus/procfs/{cpuinfo_arm64.go => cpuinfo_armx.go} (97%) delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go rename vendor/github.com/prometheus/procfs/{cpuinfo_mips64le.go => cpuinfo_mipsx.go} (94%) rename vendor/github.com/prometheus/procfs/{cpuinfo_arm.go => cpuinfo_others.go} (82%) delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go rename vendor/github.com/prometheus/procfs/{cpuinfo_ppc64.go => cpuinfo_ppcx.go} (96%) rename vendor/github.com/prometheus/procfs/{cpuinfo_default.go => cpuinfo_x86.go} (100%) rename vendor/github.com/prometheus/prometheus/storage/remote/{max_gauge.go => max_timestamp.go} (80%) create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/block/writer.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/compact/planner.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go delete mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/storepb/testutil/series.go create mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go rename vendor/golang.org/x/net/idna/{tables12.00.go => tables12.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_zos.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_zos_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_zos_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/control_zos.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_zos.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_zos_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_zos.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_zos.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go delete mode 100644 vendor/google.golang.org/api/internal/service-account.json create mode 100644 vendor/google.golang.org/api/transport/internal/dca/dca.go create mode 100644 vendor/k8s.io/klog/v2/SECURITY.md diff --git a/go.mod b/go.mod index 39ccb2aee5453..cf12ead0d1787 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/grafana/loki go 1.15 require ( + github.com/NYTimes/gziphandler v1.1.1 github.com/aws/aws-lambda-go v1.17.0 github.com/blang/semver v3.5.1+incompatible // indirect github.com/bmatcuk/doublestar v1.2.2 @@ -10,7 +11,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.4.1-0.20201022071705-85942c5703cf + github.com/cortexproject/cortex v1.6.0 github.com/davecgh/go-spew v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20201009160326-9c15e82f19b0+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect @@ -37,17 +38,17 @@ require ( github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible github.com/json-iterator/go v1.1.10 github.com/klauspost/compress v1.9.5 - github.com/mitchellh/mapstructure v1.2.2 + github.com/mitchellh/mapstructure v1.3.3 github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/opentracing/opentracing-go v1.2.0 // github.com/pierrec/lz4 v2.0.5+incompatible github.com/pierrec/lz4/v4 v4.0.2-0.20200813132121-22f5d580d5c4 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.7.1 + github.com/prometheus/client_golang v1.8.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.14.0 - github.com/prometheus/prometheus v1.8.2-0.20201014093524-73e2ce1bd643 + github.com/prometheus/common v0.15.0 + github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 @@ -55,12 +56,12 @@ require ( github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/ugorji/go v1.1.7 // indirect - github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 + github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 go.uber.org/atomic v1.7.0 golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 - golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 - google.golang.org/grpc v1.32.0 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + google.golang.org/grpc v1.33.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.3.0 @@ -87,3 +88,10 @@ replace google.golang.org/grpc => google.golang.org/grpc v1.29.1 // Same as Cortex // Using a 3rd-party branch for custom dialer - see https://github.com/bradfitz/gomemcache/pull/86 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab + +// Fix errors like too many arguments in call to "github.com/go-openapi/errors".Required +// have (string, string) +// want (string, string, interface {}) +replace github.com/go-openapi/errors => github.com/go-openapi/errors v0.19.4 + +replace github.com/go-openapi/validate => github.com/go-openapi/validate v0.19.8 diff --git a/go.sum b/go.sum index 27a6e4b1520ab..d48822ae7064c 100644 --- a/go.sum +++ b/go.sum @@ -74,6 +74,8 @@ github.com/Azure/go-autorest/autorest v0.11.4 h1:iWJqGEvip7mjibEqC/srXNdo+4wLEPi github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.10 h1:j5sGbX7uj1ieYYkQ3Mpvewd4DCsEQ+ZeJpqnSM9pjnM= github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.11 h1:k/wzH9pA3hrtFNsEhJ5SqPEs75W3bzS8VOYA/fJ0j1k= +github.com/Azure/go-autorest/autorest v0.11.11/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -178,6 +180,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3 h1:SuCy7H3NLyp+1Mrfp+m80jcbi9KYWAs9/BXwppwRDzY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.17.0 h1:Ogihmi8BnpmCNktKAGpNwSiILNNING1MiosnKUfU8m0= @@ -193,8 +197,11 @@ github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.9 h1:cUGBW9CVdi0mS7K1hDzxIqTpfeWhpoQiguq81M1tjK0= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.5 h1:doSEOxC0UkirPcle20Rc+1kAhJ4Ip+GSEeZ3nKl7Qlk= github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.35.31 h1:6tlaYq4Q311qfhft/fIaND33XI27aW3zIdictcHxifE= +github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -269,8 +276,10 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= -github.com/cortexproject/cortex v1.4.1-0.20201022071705-85942c5703cf h1:TGmSZFMNUP0U3bFMPeaVo1TFWlFaBtxrwS5Rs0zmFCs= -github.com/cortexproject/cortex v1.4.1-0.20201022071705-85942c5703cf/go.mod h1:MBJnS5mzVcHqivBp2391HpflMeMiT+f8r4VNkJlsZFs= +github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= +github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= +github.com/cortexproject/cortex v1.6.0 h1:/NOdjt80poIPchA9rItwYGeNt2ddxPqMNrCpnRP2iUg= +github.com/cortexproject/cortex v1.6.0/go.mod h1:QSi2ZZeKG3OoZ1+mJSthJK5fnMYAxPUnBEzt0c8Mk1Q= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= @@ -307,6 +316,8 @@ github.com/digitalocean/godo v1.42.1 h1:SJ/XMVsp5CZmyQal8gLlOl9jSl1i3FaN20LlgtK5 github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.46.0 h1:WRbwjATilgz2NE4NGMeSDpeicy9h4xSKNGuRJ/Nq/fA= github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.52.0 h1:1QSUC0w5T1wS1d/1uvPtG8GLeD0p/4zhx1Q+Fxtna+k= +github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -403,47 +414,35 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE= github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4 h1:fSGwO1tSYHFu70NKaWJt5Qh0qoBRtCm/mXS1yhf+0W0= github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls= github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15 h1:2GIefxs9Rx1vCDNghRtypRq+ig8KSLrjHbAYI/gCLCM= @@ -451,7 +450,6 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= @@ -460,27 +458,23 @@ github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJN github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11 h1:0+YvbNh05rmBkgztd6zHp4OCFn7Mtu30bn46NQo2ZRw= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod h1:CJP1ZIHwhosNYwIdaHPZK9vHsM3+roNBaZ7U9Of1DXc= @@ -569,6 +563,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -616,6 +612,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66 github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7 h1:qYWTuM6SUNWgtvkhV8oH6GFHCpU+rKQOxPcepM3xKi0= github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201117184057-ae444373da19 h1:iFELRewmQ9CldLrqgr0E6b6ZPfZmMvLyyz6kMsR+c4w= +github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -642,6 +640,8 @@ github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43Av github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.13.0 h1:1XkslZZRm6Ks0bLup+hBNth+KQf+0JA1UeoB7YKw9E8= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gophercloud/gophercloud v0.14.0 h1:c2Byo+YMxhHlTJ3TPptjQ4dOQ1YknTHDJ/9zClDH+84= +github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= @@ -673,6 +673,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMc github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.15.0 h1:ntPNC9TD/6l2XDenJZe6T5lSMg95thpV9sGAqHX4WU8= github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= @@ -752,9 +754,12 @@ github.com/hetznercloud/hcloud-go v1.21.1 h1:LWNozxiZhKmeMqYbAS7KsAcPcxg47afCnTe github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hetznercloud/hcloud-go v1.22.0 h1:CC0jwkaBzwP4ObFE0sdJBTvGh5DE9kB/tuDETnRfOik= github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hetznercloud/hcloud-go v1.23.1 h1:SkYdCa6x458cMSDz5GI18iPz5j2hicACiDP6J/s/bTs= +github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -861,6 +866,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 h1:yZJImkCmVI6d1uJ9KRRf/96YbFLDQ/hhs6Xt9Z3OBXI= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= @@ -877,7 +884,6 @@ github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -929,6 +935,8 @@ github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo= github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.35 h1:oTfOaDH+mZkdcgdIjH6yBajRGtIwcwcaR+rt23ZSrJs= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= @@ -954,6 +962,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 h1:SPoLlS9qUUnXcIY4pvA4CTwYjk0Is5f4UPEkeESr53k= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1054,6 +1064,7 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1082,6 +1093,8 @@ github.com/prometheus/alertmanager v0.21.0 h1:qK51JcUR9l/unhawGA9F9B64OCYfcGewhP github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939 h1:/gGoc4W45469qMuGGEMArYEs8wsk31/5oE56NUGjEN0= github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= +github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054 h1:NgCRBfzDpyIhX6Pjh7XSWPHUC8T5dA1yVuK/gwXM7Jw= +github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1097,6 +1110,8 @@ github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83A github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1120,6 +1135,8 @@ github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1135,6 +1152,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= @@ -1145,8 +1164,12 @@ github.com/prometheus/prometheus v1.8.2-0.20200805082714-e0cf219f0de2/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20200819132913-cb830b0a9c78/go.mod h1:zfAqy/MwhMFajB9E2n12/9gG2fvofIE9uKDtlZCDxqs= github.com/prometheus/prometheus v1.8.2-0.20200923143134-7e2db3d092f3 h1:ETwF5e2G5PykV5usbsWoh1JfSbo50R07aMDdhTkC5SA= github.com/prometheus/prometheus v1.8.2-0.20200923143134-7e2db3d092f3/go.mod h1:9VNWoDFHOMovlubld5uKKxfCDcPBj2GMOCjcUFXkYaM= -github.com/prometheus/prometheus v1.8.2-0.20201014093524-73e2ce1bd643 h1:BDAexvKlOVjE5A8MlqRxzwkEpPl1/v6ydU1/J7kJtZc= -github.com/prometheus/prometheus v1.8.2-0.20201014093524-73e2ce1bd643/go.mod h1:XYjkJiog7fyQu3puQNivZPI2pNq1C/775EIoHfDvuvY= +github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= +github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9 h1:T6pkPNGKXv21lLfgD/mnIABj9aOhmz8HphDmKllfKWs= +github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= +github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= +github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f h1:OgXvmGvAEUIWgzBTrfXdEMCFxnXBirp32iF0TX2GvqY= +github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1249,6 +1272,10 @@ github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763 h1:c84P3YUu8bx github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a h1:4rNkFHeY+EIR7UdiYn5fZE7Q35Y3Dmae8q1Qbb90tcY= github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a/go.mod h1:A3qUEEbsVkplJnxyDLwuIuvTDaJPByTH+hMdTl9ujAA= +github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 h1:z3hglXVwJ4HgU0OoDS+8+MvEipv/U83IQ+fMsDr00YQ= +github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= +github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58 h1:Q5t3TKhiFQ2J3XQv1psoMBSBk/Dx6p4JqoETXiWQaYg= +github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58/go.mod h1:ffr9z+gefM664JBH/CEMHyHvShq2BQTejT/Ws+V+80Q= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1264,6 +1291,7 @@ github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:Q5IRRDY+ github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= @@ -1287,6 +1315,8 @@ github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 h1:dNVIG9aKQHR9T github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 h1:MS5M2antM8wzMUqVxIfAi+yb6yjXvDINRFvLnmNXeIw= github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= +github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec h1:5JmevdpzK10Z2ua0VDToj7Kg2+/t0FzdYBjsurYRE8k= +github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1324,6 +1354,8 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1364,7 +1396,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1437,7 +1468,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1476,6 +1506,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1486,6 +1519,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BG golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1498,6 +1533,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2By golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0= golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1512,7 +1549,6 @@ golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1584,6 +1620,9 @@ golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71 h1:ZPX6UakxrJCxWiyGWpXtFY+fp86Esy7xJT/jJCG8bgU= golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1593,6 +1632,8 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1674,8 +1715,10 @@ golang.org/x/tools v0.0.0-20200822203824-307de81be3f4 h1:r0nbB2EeRbGpnVeqxlkgiBp golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201008025239-9df69603baec h1:RY2OghEV/7X1MLaecgm1mwFd3sGvUddm5pGVSxQvX0c= -golang.org/x/tools v0.0.0-20201008025239-9df69603baec/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca h1:pvScuB+UnCGDas2naNKUOXruM08MjwVcEdaweeynIqQ= +golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c h1:EFNvrTleQM8C5s1RJh1h8mJzTEV7BJxtXTP+6qSofJY= +golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -1710,6 +1753,8 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1845,6 +1890,8 @@ k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= @@ -1854,6 +1901,8 @@ k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -1869,6 +1918,8 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index a51c1619fecfa..9c796abcfd8e7 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -349,7 +349,11 @@ func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc) (r return result, nil } -func (r mockRing) GetAll(op ring.Operation) (ring.ReplicationSet, error) { +func (r mockRing) GetAllHealthy(op ring.Operation) (ring.ReplicationSet, error) { + return r.GetReplicationSetForOperation(op) +} + +func (r mockRing) GetReplicationSetForOperation(op ring.Operation) (ring.ReplicationSet, error) { return ring.ReplicationSet{ Ingesters: r.ingesters, MaxErrors: 1, diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go index 126318957406a..01207d63032bf 100644 --- a/pkg/ingester/checkpoint.go +++ b/pkg/ingester/checkpoint.go @@ -350,7 +350,7 @@ func (w *WALCheckpointWriter) deleteCheckpoints(maxIndex int) (err error) { } }() - var errs tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() files, err := ioutil.ReadDir(w.segmentWAL.Dir()) if err != nil { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 13f60a1a43e21..21c1a8772cb7e 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -7,6 +7,9 @@ import ( "fmt" "net/http" + frontend "github.com/cortexproject/cortex/pkg/frontend/v1" + "github.com/cortexproject/cortex/pkg/querier/worker" + "github.com/grafana/loki/pkg/storage/stores/shipper/compactor" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -15,7 +18,6 @@ import ( "github.com/weaveworks/common/signals" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" cortex_ruler "github.com/cortexproject/cortex/pkg/ruler" @@ -60,7 +62,7 @@ type Config struct { SchemaConfig storage.SchemaConfig `yaml:"schema_config,omitempty"` LimitsConfig validation.Limits `yaml:"limits_config,omitempty"` TableManager chunk.TableManagerConfig `yaml:"table_manager,omitempty"` - Worker frontend.WorkerConfig `yaml:"frontend_worker,omitempty"` + Worker worker.Config `yaml:"frontend_worker,omitempty"` Frontend lokifrontend.Config `yaml:"frontend,omitempty"` Ruler ruler.Config `yaml:"ruler,omitempty"` QueryRange queryrange.Config `yaml:"query_range,omitempty"` diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 99990ea0f25b4..e97487cca7f49 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -9,6 +9,11 @@ import ( "os" "time" + "github.com/NYTimes/gziphandler" + "github.com/cortexproject/cortex/pkg/frontend" + "github.com/cortexproject/cortex/pkg/frontend/transport" + "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" + "github.com/grafana/loki/pkg/ruler/manager" "github.com/grafana/loki/pkg/storage/stores/shipper/compactor" @@ -18,8 +23,7 @@ import ( cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/cortex" - cortex_querier "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/frontend" + cortex_querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" @@ -159,11 +163,23 @@ func (t *Loki) initDistributor() (services.Service, error) { } func (t *Loki) initQuerier() (services.Service, error) { - level.Debug(util.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker)) - worker, err := frontend.NewWorker(t.cfg.Worker, cortex_querier.Config{MaxConcurrent: t.cfg.Querier.MaxConcurrent}, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util.Logger) - if err != nil { - return nil, err + var ( + worker services.Service + err error + ) + + // NewQuerierWorker now expects Frontend (or Scheduler) address to be set. Loki only supports Frontend for now. + if t.cfg.Worker.FrontendAddress != "" { + // In case someone set scheduler address, we ignore it. + t.cfg.Worker.SchedulerAddress = "" + t.cfg.Worker.MaxConcurrentRequests = t.cfg.Querier.MaxConcurrent + level.Debug(util.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker)) + worker, err = cortex_querier_worker.NewQuerierWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } } + if t.cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 { t.cfg.Querier.IngesterQueryStoreMaxLookback = t.cfg.Ingester.QueryStoreMaxLookBackPeriod } @@ -345,12 +361,23 @@ type disabledShuffleShardingLimits struct{} func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { return 0 } func (t *Loki) initQueryFrontend() (_ services.Service, err error) { - level.Debug(util.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend)) - t.frontend, err = frontend.New(t.cfg.Frontend.Config, disabledShuffleShardingLimits{}, util.Logger, prometheus.DefaultRegisterer) + + roundTripper, frontendV1, _, err := frontend.InitFrontend(frontend.CombinedFrontendConfig{ + // Don't set FrontendV2 field to make sure that only frontendV1 can be initialized. + Handler: t.cfg.Frontend.Handler, + FrontendV1: t.cfg.Frontend.FrontendV1, + CompressResponses: t.cfg.Frontend.CompressResponses, + DownstreamURL: t.cfg.Frontend.DownstreamURL, + }, disabledShuffleShardingLimits{}, t.cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) if err != nil { - return + return nil, err } + t.frontend = frontendV1 + if t.frontend != nil { + frontendv1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend) + } + level.Debug(util.Logger).Log("msg", "initializing query range tripperware", "config", fmt.Sprintf("%+v", t.cfg.QueryRange), "limits", fmt.Sprintf("%+v", t.cfg.LimitsConfig), @@ -367,16 +394,20 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { return } t.stopper = stopper - t.frontend.Wrap(tripperware) - frontend.RegisterFrontendServer(t.Server.GRPC, t.frontend) - frontendHandler := middleware.Merge( + roundTripper = tripperware(roundTripper) + frontendHandler := transport.NewHandler(t.cfg.Frontend.Handler, roundTripper, util.Logger, prometheus.DefaultRegisterer) + if t.cfg.Frontend.CompressResponses { + frontendHandler = gziphandler.GzipHandler(frontendHandler) + } + + frontendHandler = middleware.Merge( serverutil.RecoveryHTTPMiddleware, t.httpAuthMiddleware, queryrange.StatsHTTPMiddleware, serverutil.NewPrepopulateMiddleware(), serverutil.ResponseJSONMiddleware(), - ).Wrap(t.frontend.Handler()) + ).Wrap(frontendHandler) var defaultHandler http.Handler if t.cfg.Frontend.TailProxyURL != "" { diff --git a/pkg/lokifrontend/config.go b/pkg/lokifrontend/config.go index 800c517e4c7ff..31736db11293b 100644 --- a/pkg/lokifrontend/config.go +++ b/pkg/lokifrontend/config.go @@ -3,16 +3,27 @@ package lokifrontend import ( "flag" - "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/frontend/transport" + v1 "github.com/cortexproject/cortex/pkg/frontend/v1" ) type Config struct { - frontend.Config `yaml:",inline"` - TailProxyURL string `yaml:"tail_proxy_url"` + Handler transport.HandlerConfig `yaml:",inline"` + FrontendV1 v1.Config `yaml:",inline"` + + CompressResponses bool `yaml:"compress_responses"` + DownstreamURL string `yaml:"downstream_url"` + + TailProxyURL string `yaml:"tail_proxy_url"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.Config.RegisterFlags(f) + cfg.Handler.RegisterFlags(f) + cfg.FrontendV1.RegisterFlags(f) + + f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") + f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") + f.StringVar(&cfg.TailProxyURL, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.") } diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go index 2ebf8a29c0c06..88735c36955b4 100644 --- a/pkg/querier/ingester_querier.go +++ b/pkg/querier/ingester_querier.go @@ -63,7 +63,7 @@ func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryD // forAllIngesters runs f, in parallel, for all ingesters // TODO taken from Cortex, see if we can refactor out an usable interface. func (q *IngesterQuerier) forAllIngesters(ctx context.Context, f func(logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { - replicationSet, err := q.ring.GetAll(ring.Read) + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) if err != nil { return nil, err } @@ -169,7 +169,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo } // Get the current replication set from the ring - replicationSet, err := q.ring.GetAll(ring.Read) + replicationSet, err := q.ring.GetReplicationSetForOperation(ring.Read) if err != nil { return nil, err } @@ -226,7 +226,7 @@ func (q *IngesterQuerier) Series(ctx context.Context, req *logproto.SeriesReques } func (q *IngesterQuerier) TailersCount(ctx context.Context) ([]uint32, error) { - replicationSet, err := q.ring.GetAll(ring.Read) + replicationSet, err := q.ring.GetAllHealthy(ring.Read) if err != nil { return nil, err } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 4439743f70a8c..e5f5e54255e31 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -319,7 +319,11 @@ func (r *readRingMock) BatchGet(keys []uint32, op ring.Operation) ([]ring.Replic return []ring.ReplicationSet{r.replicationSet}, nil } -func (r *readRingMock) GetAll(op ring.Operation) (ring.ReplicationSet, error) { +func (r *readRingMock) GetAllHealthy(op ring.Operation) (ring.ReplicationSet, error) { + return r.replicationSet, nil +} + +func (r *readRingMock) GetReplicationSetForOperation(op ring.Operation) (ring.ReplicationSet, error) { return r.replicationSet, nil } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 5cd4e366e2a97..840103cf98f30 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -9,7 +9,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -45,7 +44,7 @@ func NewTripperware( schema chunk.SchemaConfig, minShardingLookback time.Duration, registerer prometheus.Registerer, -) (frontend.Tripperware, Stopper, error) { +) (queryrange.Tripperware, Stopper, error) { // Ensure that QuerySplitDuration uses configuration defaults. // This avoids divide by zero errors when determining cache keys where user specific overrides don't exist. limits = WithDefaultLimits(limits, cfg.Config) @@ -222,8 +221,8 @@ func NewLogFilterTripperware( retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, shardingMetrics *logql.ShardingMetrics, splitByMetrics *SplitByMetrics, -) (frontend.Tripperware, error) { - queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.LimitsMiddleware(limits)} +) (queryrange.Tripperware, error) { + queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.NewLimitsMiddleware(limits)} if cfg.SplitQueriesByInterval != 0 { queryRangeMiddleware = append(queryRangeMiddleware, queryrange.InstrumentMiddleware("split_by_interval", instrumentMetrics), SplitByIntervalMiddleware(limits, codec, splitByMetrics)) } @@ -265,7 +264,7 @@ func NewSeriesTripperware( instrumentMetrics *queryrange.InstrumentMiddlewareMetrics, retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, splitByMetrics *SplitByMetrics, -) (frontend.Tripperware, error) { +) (queryrange.Tripperware, error) { queryRangeMiddleware := []queryrange.Middleware{} if cfg.SplitQueriesByInterval != 0 { queryRangeMiddleware = append(queryRangeMiddleware, @@ -295,7 +294,7 @@ func NewLabelsTripperware( instrumentMetrics *queryrange.InstrumentMiddlewareMetrics, retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, splitByMetrics *SplitByMetrics, -) (frontend.Tripperware, error) { +) (queryrange.Tripperware, error) { queryRangeMiddleware := []queryrange.Middleware{} if cfg.SplitQueriesByInterval != 0 { queryRangeMiddleware = append(queryRangeMiddleware, @@ -331,8 +330,8 @@ func NewMetricTripperware( shardingMetrics *logql.ShardingMetrics, splitByMetrics *SplitByMetrics, registerer prometheus.Registerer, -) (frontend.Tripperware, Stopper, error) { - queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.LimitsMiddleware(limits)} +) (queryrange.Tripperware, Stopper, error) { + queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.NewLimitsMiddleware(limits)} if cfg.AlignQueriesWithStep { queryRangeMiddleware = append( queryRangeMiddleware, @@ -406,7 +405,7 @@ func NewMetricTripperware( // Finally, if the user selected any query range middleware, stitch it in. if len(queryRangeMiddleware) > 0 { rt := queryrange.NewRoundTripper(next, codec, queryRangeMiddleware...) - return frontend.RoundTripFunc(func(r *http.Request) (*http.Response, error) { + return queryrange.RoundTripFunc(func(r *http.Request) (*http.Response, error) { if !strings.HasSuffix(r.URL.Path, "/query_range") { return next.RoundTrip(r) } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index d308964cc0fe0..aaaafc302adea 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -15,7 +15,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/cortexproject/cortex/pkg/util" "github.com/prometheus/prometheus/pkg/labels" @@ -405,22 +404,22 @@ func TestPostQueries(t *testing.T) { req = req.WithContext(user.InjectOrgID(context.Background(), "1")) require.NoError(t, err) _, err = newRoundTripper( - frontend.RoundTripFunc(func(*http.Request) (*http.Response, error) { + queryrange.RoundTripFunc(func(*http.Request) (*http.Response, error) { t.Error("unexpected default roundtripper called") return nil, nil }), - frontend.RoundTripFunc(func(*http.Request) (*http.Response, error) { + queryrange.RoundTripFunc(func(*http.Request) (*http.Response, error) { return nil, nil }), - frontend.RoundTripFunc(func(*http.Request) (*http.Response, error) { + queryrange.RoundTripFunc(func(*http.Request) (*http.Response, error) { t.Error("unexpected metric roundtripper called") return nil, nil }), - frontend.RoundTripFunc(func(*http.Request) (*http.Response, error) { + queryrange.RoundTripFunc(func(*http.Request) (*http.Response, error) { t.Error("unexpected series roundtripper called") return nil, nil }), - frontend.RoundTripFunc(func(*http.Request) (*http.Response, error) { + queryrange.RoundTripFunc(func(*http.Request) (*http.Response, error) { t.Error("unexpected labels roundtripper called") return nil, nil }), @@ -528,6 +527,10 @@ func (f fakeLimits) MaxCacheFreshness(string) time.Duration { return 1 * time.Minute } +func (f fakeLimits) MaxQueryLookback(string) time.Duration { + return 0 +} + func counter() (*int, http.Handler) { count := 0 var lock sync.Mutex diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index d381aaaaead04..6228d5975700c 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -40,6 +40,7 @@ type Limits struct { // Querier enforced limits. MaxChunksPerQuery int `yaml:"max_chunks_per_query"` MaxQuerySeries int `yaml:"max_query_series"` + MaxQueryLookback time.Duration `yaml:"max_query_lookback"` MaxQueryLength time.Duration `yaml:"max_query_length"` MaxQueryParallelism int `yaml:"max_query_parallelism"` CardinalityLimit int `yaml:"cardinality_limit"` @@ -77,6 +78,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.") f.IntVar(&l.MaxQuerySeries, "querier.max-query-series", 500, "Limit the maximum of unique series returned by a metric query. When the limit is reached an error is returned.") + f.DurationVar(&l.MaxQueryLookback, "querier.max-query-lookback", 0, "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.") f.IntVar(&l.MaxStreamsMatchersPerQuery, "querier.max-streams-matcher-per-query", 1000, "Limit the number of streams matchers per query") @@ -256,6 +258,11 @@ func (o *Overrides) MaxCacheFreshness(userID string) time.Duration { return o.getOverridesForUser(userID).MaxCacheFreshness } +// MaxQueryLookback returns the max lookback period of queries. +func (o *Overrides) MaxQueryLookback(userID string) time.Duration { + return o.getOverridesForUser(userID).MaxQueryLookback +} + func (o *Overrides) getOverridesForUser(userID string) *Limits { if o.tenantLimits != nil { l := o.tenantLimits(userID) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 26be936b7e5f2..a0b969dffa450 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -171,6 +171,11 @@ type Resource struct { ResourceName string } +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + // ParseResourceID parses a resource ID into a ResourceDetails struct. // See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. func ParseResourceID(resourceID string) (Resource, error) { diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml index 17c4d0a710716..bb83c6670df63 100644 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -1,18 +1,12 @@ -dist: bionic language: go -env: GO111MODULE=on GOFLAGS='-mod vendor' -install: true -email: false - +dist: xenial go: - - 1.10 - - 1.11 - - 1.12 - - 1.13 - - tip + - '1.10' + - '1.11' + - '1.12' + - '1.13' + - 'tip' -before_script: - - go install github.com/golangci/golangci-lint/cmd/golangci-lint script: - - golangci-lint run # run a bunch of code checkers/linters in parallel - - go test -v -race ./... # Run all the tests with the race detector enabled + - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..4b462b0d81b17 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +This project adheres to [The Code Manifesto](http://codemanifesto.com) +as its guidelines for contributor interactions. + +## The Code Manifesto + +We want to work in an ecosystem that empowers developers to reach their +potential — one that encourages growth and effective collaboration. A space +that is safe for all. + +A space such as this benefits everyone that participates in it. It encourages +new developers to enter our field. It is through discussion and collaboration +that we grow, and through growth that we improve. + +In the effort to create such a place, we hold to these values: + +1. **Discrimination limits us.** This includes discrimination on the basis of + race, gender, sexual orientation, gender identity, age, nationality, + technology and any other arbitrary exclusion of a group of people. +2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort + levels. Remember that, and if brought to your attention, heed it. +3. **We are our biggest assets.** None of us were born masters of our trade. + Each of us has been helped along the way. Return that favor, when and where + you can. +4. **We are resources for the future.** As an extension of #3, share what you + know. Make yourself a resource to help those that come after you. +5. **Respect defines us.** Treat others as you wish to be treated. Make your + discussions, criticisms and debates from a position of respectfulness. Ask + yourself, is it true? Is it necessary? Is it constructive? Anything less is + unacceptable. +6. **Reactions require grace.** Angry responses are valid, but abusive language + and vindictive actions are toxic. When something happens that offends you, + handle it assertively, but be respectful. Escalate reasonably, and try to + allow the offender an opportunity to explain themselves, and possibly + correct the issue. +7. **Opinions are just that: opinions.** Each and every one of us, due to our + background and upbringing, have varying opinions. That is perfectly + acceptable. Remember this: if you respect your own opinions, you should + respect the opinions of others. +8. **To err is human.** You might not intend it, but mistakes do happen and + contribute to build experience. Tolerate honest mistakes, and don't + hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE index 2f9a31fadf678..cacba91024000 100644 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Alex Saskevich +Copyright (c) 2014-2020 Alex Saskevich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md index bfe6e35b182ad..39121ea8e37c2 100644 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -1,7 +1,8 @@ govalidator =========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) +[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). @@ -83,14 +84,14 @@ This was changed to prevent data races when accessing custom validators. import "github.com/asaskevich/govalidator" // before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { +govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { // ... -}) +} // after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { // ... -})) +}) ``` #### List of functions: @@ -238,7 +239,7 @@ func Trim(str, chars string) string func Truncate(str string, length int, ending string) string func TruncatingErrorf(str string, args ...interface{}) error func UnderscoreToCamelCase(s string) string -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) +func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) func ValidateStruct(s interface{}) (bool, error) func WhiteList(str, chars string) string type ConditionIterator @@ -279,7 +280,7 @@ type User struct { Age int `valid:"type(int)"` Meta interface{} `valid:"type(string)"` } -result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"}) +result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) if err != nil { println("error: " + err.Error()) } @@ -392,6 +393,8 @@ Validators with parameters "matches(pattern)": StringMatches, "in(string1|string2|...|stringN)": IsIn, "rsapub(keylength)" : IsRsaPub, +"minstringlength(int): MinStringLength, +"maxstringlength(int): MaxStringLength, ``` Validators with parameters for any type @@ -461,7 +464,7 @@ var inputMap = map[string]interface{}{ }, } -result, err := govalidator.ValidateMap(mapTemplate, inputMap) +result, err := govalidator.ValidateMap(inputMap, mapTemplate) if err != nil { println("error: " + err.Error()) } @@ -487,7 +490,7 @@ type StructWithCustomByteArray struct { CustomMinLength int `valid:"-"` } -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // you can type switch on the context interface being validated case StructWithCustomByteArray: // you can check and validate against some other field in the context, @@ -507,14 +510,25 @@ govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator } } return false -})) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { +}) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation case StructWithCustomByteArray: return len(v.ID) >= v.CustomMinLength } return false -})) +}) +``` + +###### Loop over Error() +By default .Error() returns all errors in a single String. To access each error you can do this: +```go + if err != nil { + errs := err.(govalidator.Errors).Errors() + for _, e := range errs { + fmt.Println(e.Error()) + } + } ``` ###### Custom error messages @@ -602,4 +616,4 @@ Support this project by becoming a sponsor. Your logo will show up here with a l ## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go index 5bace2654d3bc..3e1da7cb480eb 100644 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -9,6 +9,35 @@ type ResultIterator func(interface{}, int) interface{} // ConditionIterator is the function that accepts element of slice/array and its index and returns boolean type ConditionIterator func(interface{}, int) bool +// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values +type ReduceIterator func(interface{}, interface{}) interface{} + +// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. +func Some(array []interface{}, iterator ConditionIterator) bool { + res := false + for index, data := range array { + res = res || iterator(data, index) + } + return res +} + +// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. +func Every(array []interface{}, iterator ConditionIterator) bool { + res := true + for index, data := range array { + res = res && iterator(data, index) + } + return res +} + +// Reduce boils down a list of values into a single value by ReduceIterator +func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { + for _, data := range array { + initialValue = iterator(initialValue, data) + } + return initialValue +} + // Each iterates over the slice and apply Iterator to every item func Each(array []interface{}, iterator Iterator) { for index, data := range array { diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go index cf1e5d569ba01..d68e990fc256f 100644 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -10,7 +10,7 @@ import ( // ToString convert the input to a string. func ToString(obj interface{}) string { res := fmt.Sprintf("%v", obj) - return string(res) + return res } // ToJSON convert the input to a valid JSON string @@ -23,12 +23,27 @@ func ToJSON(obj interface{}) (string, error) { } // ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(str string) (float64, error) { - res, err := strconv.ParseFloat(str, 64) - if err != nil { - res = 0.0 +func ToFloat(value interface{}) (res float64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = float64(val.Int()) + case uint, uint8, uint16, uint32, uint64: + res = float64(val.Uint()) + case float32, float64: + res = val.Float() + case string: + res, err = strconv.ParseFloat(val.String(), 64) + if err != nil { + res = 0 + } + default: + err = fmt.Errorf("ToInt: unknown interface type %T", value) + res = 0 } - return res, err + + return } // ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. @@ -40,6 +55,8 @@ func ToInt(value interface{}) (res int64, err error) { res = val.Int() case uint, uint8, uint16, uint32, uint64: res = int64(val.Uint()) + case float32, float64: + res = int64(val.Float()) case string: if IsInt(val.String()) { res, err = strconv.ParseInt(val.String(), 0, 64) @@ -47,11 +64,11 @@ func ToInt(value interface{}) (res int64, err error) { res = 0 } } else { - err = fmt.Errorf("math: square root of negative number %g", value) + err = fmt.Errorf("ToInt: invalid numeric format %g", value) res = 0 } default: - err = fmt.Errorf("math: square root of negative number %g", value) + err = fmt.Errorf("ToInt: unknown interface type %T", value) res = 0 } diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go index 655b750cb8f6e..1da2336f47ee2 100644 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -1,6 +1,9 @@ package govalidator -import "strings" +import ( + "sort" + "strings" +) // Errors is an array of multiple errors and conforms to the error interface. type Errors []error @@ -15,6 +18,7 @@ func (es Errors) Error() string { for _, e := range es { errs = append(errs, e.Error()) } + sort.Strings(errs) return strings.Join(errs, ";") } diff --git a/vendor/github.com/asaskevich/govalidator/go.mod b/vendor/github.com/asaskevich/govalidator/go.mod index c1ce891dfa031..42d5b1f638577 100644 --- a/vendor/github.com/asaskevich/govalidator/go.mod +++ b/vendor/github.com/asaskevich/govalidator/go.mod @@ -1,3 +1,3 @@ module github.com/asaskevich/govalidator -go 1.12 +go 1.13 diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go index 7e6c652e140c6..5041d9e86844c 100644 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -2,7 +2,6 @@ package govalidator import ( "math" - "reflect" ) // Abs returns absolute value of number @@ -41,7 +40,7 @@ func IsNonPositive(value float64) bool { return value <= 0 } -// InRange returns true if value lies between left and right border +// InRangeInt returns true if value lies between left and right border func InRangeInt(value, left, right interface{}) bool { value64, _ := ToInt(value) left64, _ := ToInt(left) @@ -52,7 +51,7 @@ func InRangeInt(value, left, right interface{}) bool { return value64 >= left64 && value64 <= right64 } -// InRange returns true if value lies between left and right border +// InRangeFloat32 returns true if value lies between left and right border func InRangeFloat32(value, left, right float32) bool { if left > right { left, right = right, left @@ -60,7 +59,7 @@ func InRangeFloat32(value, left, right float32) bool { return value >= left && value <= right } -// InRange returns true if value lies between left and right border +// InRangeFloat64 returns true if value lies between left and right border func InRangeFloat64(value, left, right float64) bool { if left > right { left, right = right, left @@ -68,20 +67,24 @@ func InRangeFloat64(value, left, right float64) bool { return value >= left && value <= right } -// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. +// All types must the same type. +// False if value doesn't lie in range or if it incompatible or not comparable func InRange(value interface{}, left interface{}, right interface{}) bool { - - reflectValue := reflect.TypeOf(value).Kind() - reflectLeft := reflect.TypeOf(left).Kind() - reflectRight := reflect.TypeOf(right).Kind() - - if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { - return InRangeInt(value.(int), left.(int), right.(int)) - } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { - return InRangeFloat32(value.(float32), left.(float32), right.(float32)) - } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { - return InRangeFloat64(value.(float64), left.(float64), right.(float64)) - } else { + switch value.(type) { + case int: + intValue, _ := ToInt(value) + intLeft, _ := ToInt(left) + intRight, _ := ToInt(right) + return InRangeInt(intValue, intLeft, intRight) + case float32, float64: + intValue, _ := ToFloat(value) + intLeft, _ := ToFloat(left) + intRight, _ := ToFloat(right) + return InRangeFloat64(intValue, intLeft, intRight) + case string: + return value.(string) >= left.(string) && value.(string) <= right.(string) + default: return false } } diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go index 1cf972683e9e6..106ed94f80ad0 100644 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -38,7 +38,7 @@ const ( URLPort string = `(:(\d{1,5}))` URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` UnixPath string = `^(/[^/\x00]*)+/?$` @@ -48,6 +48,8 @@ const ( hasUpperCase string = ".*[[:upper:]]" hasWhitespace string = ".*[[:space:]]" hasWhitespaceOnly string = "^[[:space:]]+$" + IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" + IMSI string = "^\\d{14,15}$" ) // Used by IsFilePath func @@ -100,4 +102,6 @@ var ( rxHasUpperCase = regexp.MustCompile(hasUpperCase) rxHasWhitespace = regexp.MustCompile(hasWhitespace) rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) + rxIMEI = regexp.MustCompile(IMEI) + rxIMSI = regexp.MustCompile(IMSI) ) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go index f42a346c172ba..54218bf05a2fd 100644 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -14,8 +14,10 @@ type Validator func(str string) bool // The second parameter should be the context (in the case of validating a struct: the whole object being validated). type CustomTypeValidator func(i interface{}, o interface{}) bool -// ParamValidator is a wrapper for validator functions that accepts additional parameters. +// ParamValidator is a wrapper for validator functions that accept additional parameters. type ParamValidator func(str string, params ...string) bool + +// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value type InterfaceParamValidator func(in interface{}, params ...string) bool type tagOptionsMap map[string]tagOption @@ -72,13 +74,13 @@ var ParamTagMap = map[string]ParamValidator{ // ParamTagRegexMap maps param tags to their respective regexes. var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), } @@ -162,6 +164,7 @@ var TagMap = map[string]Validator{ "ISO3166Alpha2": IsISO3166Alpha2, "ISO3166Alpha3": IsISO3166Alpha3, "ISO4217": IsISO4217, + "IMEI": IsIMEI, } // ISO3166Entry stores country codes @@ -446,10 +449,10 @@ var ISO4217List = []string{ "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", "QAR", "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", - "VEF", "VND", "VUV", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", + "VEF", "VES", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", "YER", diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index 14682e01e2094..5c918fc4bc7de 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -32,7 +32,7 @@ var ( const maxURLRuneCount = 2083 const minURLRuneCount = 3 -const RF3339WithoutZone = "2006-01-02T15:04:05" +const rfc3339WithoutZone = "2006-01-02T15:04:05" // SetFieldsRequiredByDefault causes validation to fail when struct fields // do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). @@ -63,13 +63,13 @@ func SetNilPtrAllowedByRequired(value bool) { nilPtrAllowedByRequired = value } -// IsEmail check if the string is an email. +// IsEmail checks if the string is an email. func IsEmail(str string) bool { // TODO uppercase letters are not supported return rxEmail.MatchString(str) } -// IsExistingEmail check if the string is an email of existing domain +// IsExistingEmail checks if the string is an email of existing domain func IsExistingEmail(email string) bool { if len(email) < 6 || len(email) > 254 { @@ -84,13 +84,13 @@ func IsExistingEmail(email string) bool { if len(user) > 64 { return false } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } switch host { case "localhost", "example.com": return true } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } if _, err := net.LookupMX(host); err != nil { if _, err := net.LookupIP(host); err != nil { return false @@ -100,7 +100,7 @@ func IsExistingEmail(email string) bool { return true } -// IsURL check if the string is an URL. +// IsURL checks if the string is an URL. func IsURL(str string) bool { if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { return false @@ -124,7 +124,7 @@ func IsURL(str string) bool { return rxURL.MatchString(str) } -// IsRequestURL check if the string rawurl, assuming +// IsRequestURL checks if the string rawurl, assuming // it was received in an HTTP request, is a valid // URL confirm to RFC 3986 func IsRequestURL(rawurl string) bool { @@ -138,7 +138,7 @@ func IsRequestURL(rawurl string) bool { return true } -// IsRequestURI check if the string rawurl, assuming +// IsRequestURI checks if the string rawurl, assuming // it was received in an HTTP request, is an // absolute URI or an absolute path. func IsRequestURI(rawurl string) bool { @@ -146,7 +146,7 @@ func IsRequestURI(rawurl string) bool { return err == nil } -// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. func IsAlpha(str string) bool { if IsNull(str) { return true @@ -154,7 +154,7 @@ func IsAlpha(str string) bool { return rxAlpha.MatchString(str) } -//IsUTFLetter check if the string contains only unicode letter characters. +//IsUTFLetter checks if the string contains only unicode letter characters. //Similar to IsAlpha but for all languages. Empty string is valid. func IsUTFLetter(str string) bool { if IsNull(str) { @@ -170,7 +170,7 @@ func IsUTFLetter(str string) bool { } -// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. func IsAlphanumeric(str string) bool { if IsNull(str) { return true @@ -178,7 +178,7 @@ func IsAlphanumeric(str string) bool { return rxAlphanumeric.MatchString(str) } -// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. func IsUTFLetterNumeric(str string) bool { if IsNull(str) { return true @@ -192,7 +192,7 @@ func IsUTFLetterNumeric(str string) bool { } -// IsNumeric check if the string contains only numbers. Empty string is valid. +// IsNumeric checks if the string contains only numbers. Empty string is valid. func IsNumeric(str string) bool { if IsNull(str) { return true @@ -200,7 +200,7 @@ func IsNumeric(str string) bool { return rxNumeric.MatchString(str) } -// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// IsUTFNumeric checks if the string contains only unicode numbers of any kind. // Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. func IsUTFNumeric(str string) bool { if IsNull(str) { @@ -222,7 +222,7 @@ func IsUTFNumeric(str string) bool { } -// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. func IsUTFDigit(str string) bool { if IsNull(str) { return true @@ -243,22 +243,22 @@ func IsUTFDigit(str string) bool { } -// IsHexadecimal check if the string is a hexadecimal number. +// IsHexadecimal checks if the string is a hexadecimal number. func IsHexadecimal(str string) bool { return rxHexadecimal.MatchString(str) } -// IsHexcolor check if the string is a hexadecimal color. +// IsHexcolor checks if the string is a hexadecimal color. func IsHexcolor(str string) bool { return rxHexcolor.MatchString(str) } -// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). func IsRGBcolor(str string) bool { return rxRGBcolor.MatchString(str) } -// IsLowerCase check if the string is lowercase. Empty string is valid. +// IsLowerCase checks if the string is lowercase. Empty string is valid. func IsLowerCase(str string) bool { if IsNull(str) { return true @@ -266,7 +266,7 @@ func IsLowerCase(str string) bool { return str == strings.ToLower(str) } -// IsUpperCase check if the string is uppercase. Empty string is valid. +// IsUpperCase checks if the string is uppercase. Empty string is valid. func IsUpperCase(str string) bool { if IsNull(str) { return true @@ -274,7 +274,7 @@ func IsUpperCase(str string) bool { return str == strings.ToUpper(str) } -// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. func HasLowerCase(str string) bool { if IsNull(str) { return true @@ -282,7 +282,7 @@ func HasLowerCase(str string) bool { return rxHasLowerCase.MatchString(str) } -// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. func HasUpperCase(str string) bool { if IsNull(str) { return true @@ -290,7 +290,7 @@ func HasUpperCase(str string) bool { return rxHasUpperCase.MatchString(str) } -// IsInt check if the string is an integer. Empty string is valid. +// IsInt checks if the string is an integer. Empty string is valid. func IsInt(str string) bool { if IsNull(str) { return true @@ -298,12 +298,12 @@ func IsInt(str string) bool { return rxInt.MatchString(str) } -// IsFloat check if the string is a float. +// IsFloat checks if the string is a float. func IsFloat(str string) bool { return str != "" && rxFloat.MatchString(str) } -// IsDivisibleBy check if the string is a number that's divisible by another. +// IsDivisibleBy checks if the string is a number that's divisible by another. // If second argument is not valid integer or zero, it's return false. // Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). func IsDivisibleBy(str, num string) bool { @@ -316,12 +316,12 @@ func IsDivisibleBy(str, num string) bool { return (p == 0) || (p%q == 0) } -// IsNull check if the string is null. +// IsNull checks if the string is null. func IsNull(str string) bool { return len(str) == 0 } -// IsNotNull check if the string is not null. +// IsNotNull checks if the string is not null. func IsNotNull(str string) bool { return !IsNull(str) } @@ -336,32 +336,32 @@ func HasWhitespace(str string) bool { return len(str) > 0 && rxHasWhitespace.MatchString(str) } -// IsByteLength check if the string's length (in bytes) falls in a range. +// IsByteLength checks if the string's length (in bytes) falls in a range. func IsByteLength(str string, min, max int) bool { return len(str) >= min && len(str) <= max } -// IsUUIDv3 check if the string is a UUID version 3. +// IsUUIDv3 checks if the string is a UUID version 3. func IsUUIDv3(str string) bool { return rxUUID3.MatchString(str) } -// IsUUIDv4 check if the string is a UUID version 4. +// IsUUIDv4 checks if the string is a UUID version 4. func IsUUIDv4(str string) bool { return rxUUID4.MatchString(str) } -// IsUUIDv5 check if the string is a UUID version 5. +// IsUUIDv5 checks if the string is a UUID version 5. func IsUUIDv5(str string) bool { return rxUUID5.MatchString(str) } -// IsUUID check if the string is a UUID (version 3, 4 or 5). +// IsUUID checks if the string is a UUID (version 3, 4 or 5). func IsUUID(str string) bool { return rxUUID.MatchString(str) } -// IsCreditCard check if the string is a credit card. +// IsCreditCard checks if the string is a credit card. func IsCreditCard(str string) bool { sanitized := notNumberRegexp.ReplaceAllString(str, "") if !rxCreditCard.MatchString(sanitized) { @@ -377,7 +377,7 @@ func IsCreditCard(str string) bool { if shouldDouble { tmpNum *= 2 if tmpNum >= 10 { - sum += ((tmpNum % 10) + 1) + sum += (tmpNum % 10) + 1 } else { sum += tmpNum } @@ -390,18 +390,18 @@ func IsCreditCard(str string) bool { return sum%10 == 0 } -// IsISBN10 check if the string is an ISBN version 10. +// IsISBN10 checks if the string is an ISBN version 10. func IsISBN10(str string) bool { return IsISBN(str, 10) } -// IsISBN13 check if the string is an ISBN version 13. +// IsISBN13 checks if the string is an ISBN version 13. func IsISBN13(str string) bool { return IsISBN(str, 13) } -// IsISBN check if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be check both variants. +// IsISBN checks if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be checks both variants. func IsISBN(str string, version int) bool { sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") var checksum int32 @@ -435,13 +435,13 @@ func IsISBN(str string, version int) bool { return IsISBN(str, 10) || IsISBN(str, 13) } -// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). func IsJSON(str string) bool { var js json.RawMessage return json.Unmarshal([]byte(str), &js) == nil } -// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. func IsMultibyte(str string) bool { if IsNull(str) { return true @@ -449,7 +449,7 @@ func IsMultibyte(str string) bool { return rxMultibyte.MatchString(str) } -// IsASCII check if the string contains ASCII chars only. Empty string is valid. +// IsASCII checks if the string contains ASCII chars only. Empty string is valid. func IsASCII(str string) bool { if IsNull(str) { return true @@ -457,7 +457,7 @@ func IsASCII(str string) bool { return rxASCII.MatchString(str) } -// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. func IsPrintableASCII(str string) bool { if IsNull(str) { return true @@ -465,7 +465,7 @@ func IsPrintableASCII(str string) bool { return rxPrintableASCII.MatchString(str) } -// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. func IsFullWidth(str string) bool { if IsNull(str) { return true @@ -473,7 +473,7 @@ func IsFullWidth(str string) bool { return rxFullWidth.MatchString(str) } -// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. func IsHalfWidth(str string) bool { if IsNull(str) { return true @@ -481,7 +481,7 @@ func IsHalfWidth(str string) bool { return rxHalfWidth.MatchString(str) } -// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. func IsVariableWidth(str string) bool { if IsNull(str) { return true @@ -489,12 +489,12 @@ func IsVariableWidth(str string) bool { return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) } -// IsBase64 check if a string is base64 encoded. +// IsBase64 checks if a string is base64 encoded. func IsBase64(str string) bool { return rxBase64.MatchString(str) } -// IsFilePath check is a string is Win or Unix file path and returns it's type. +// IsFilePath checks is a string is Win or Unix file path and returns it's type. func IsFilePath(str string) (bool, int) { if rxWinPath.MatchString(str) { //check windows path limit see: @@ -575,7 +575,7 @@ func IsDNSName(str string) bool { // IsHash checks if a string is a hash of type algorithm. // Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] func IsHash(str string, algorithm string) bool { - len := "0" + var len string algo := strings.ToLower(algorithm) if algo == "crc32" || algo == "crc32b" { @@ -686,25 +686,25 @@ func IsPort(str string) bool { return false } -// IsIPv4 check if the string is an IP version 4. +// IsIPv4 checks if the string is an IP version 4. func IsIPv4(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ".") } -// IsIPv6 check if the string is an IP version 6. +// IsIPv6 checks if the string is an IP version 6. func IsIPv6(str string) bool { ip := net.ParseIP(str) return ip != nil && strings.Contains(str, ":") } -// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) func IsCIDR(str string) bool { _, _, err := net.ParseCIDR(str) return err == nil } -// IsMAC check if a string is valid MAC address. +// IsMAC checks if a string is valid MAC address. // Possible MAC formats: // 01:23:45:67:89:ab // 01:23:45:67:89:ab:cd:ef @@ -722,22 +722,70 @@ func IsHost(str string) bool { return IsIP(str) || IsDNSName(str) } -// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. func IsMongoID(str string) bool { return rxHexadecimal.MatchString(str) && (len(str) == 24) } -// IsLatitude check if a string is valid latitude. +// IsLatitude checks if a string is valid latitude. func IsLatitude(str string) bool { return rxLatitude.MatchString(str) } -// IsLongitude check if a string is valid longitude. +// IsLongitude checks if a string is valid longitude. func IsLongitude(str string) bool { return rxLongitude.MatchString(str) } -// IsRsaPublicKey check if a string is valid public key with provided length +// IsIMEI checks if a string is valid IMEI +func IsIMEI(str string) bool { + return rxIMEI.MatchString(str) +} + +// IsIMSI checks if a string is valid IMSI +func IsIMSI(str string) bool { + if !rxIMSI.MatchString(str) { + return false + } + + mcc, err := strconv.ParseInt(str[0:3], 10, 32) + if err != nil { + return false + } + + switch mcc { + case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: + case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: + case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: + case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: + case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: + case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: + case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: + case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: + case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: + case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: + case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: + case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: + case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: + case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: + case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: + case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: + case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: + case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: + case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: + case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: + case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: + case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: + case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: + case 738, 740, 742, 744, 746, 748, 750, 995: + return true + default: + return false + } + return true +} + +// IsRsaPublicKey checks if a string is valid public key with provided length func IsRsaPublicKey(str string, keylen int) bool { bb := bytes.NewBufferString(str) pemBytes, err := ioutil.ReadAll(bb) @@ -791,7 +839,7 @@ func toJSONName(tag string) string { return name } -func PrependPathToErrors(err error, path string) error { +func prependPathToErrors(err error, path string) error { switch err2 := err.(type) { case Error: err2.Path = append([]string{path}, err2.Path...) @@ -799,17 +847,23 @@ func PrependPathToErrors(err error, path string) error { case Errors: errors := err2.Errors() for i, err3 := range errors { - errors[i] = PrependPathToErrors(err3, path) + errors[i] = prependPathToErrors(err3, path) } return err2 } return err } +// ValidateArray performs validation according to condition iterator that validates every element of the array +func ValidateArray(array []interface{}, iterator ConditionIterator) bool { + return Every(array, iterator) +} + // ValidateMap use validation map for fields. // result will be equal to `false` if there are any errors. -// m is the validation map in the form -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} +// s is the map containing the data to be validated. +// m is the validation map in the form: +// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { if s == nil { return true, nil @@ -826,7 +880,7 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro presentResult = false var err error err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = PrependPathToErrors(err, key) + err = prependPathToErrors(err, key) errs = append(errs, err) } valueField := reflect.ValueOf(value) @@ -840,13 +894,13 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro if v, ok := value.(map[string]interface{}); !ok { mapResult = false err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = PrependPathToErrors(err, key) + err = prependPathToErrors(err, key) errs = append(errs, err) } else { mapResult, err = ValidateMap(v, subValidator) if err != nil { mapResult = false - err = PrependPathToErrors(err, key) + err = prependPathToErrors(err, key) errs = append(errs, err) } } @@ -857,7 +911,7 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro var err error structResult, err = ValidateStruct(valueField.Interface()) if err != nil { - err = PrependPathToErrors(err, key) + err = prependPathToErrors(err, key) errs = append(errs, err) } } @@ -878,13 +932,13 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro default: typeResult = false err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = PrependPathToErrors(err, key) + err = prependPathToErrors(err, key) errs = append(errs, err) } result = result && presentResult && typeResult && resultField && structResult && mapResult index++ } - // check required keys + // checks required keys requiredResult := true for key, value := range m { if schema, ok := value.(string); ok { @@ -943,7 +997,7 @@ func ValidateStruct(s interface{}) (bool, error) { var err error structResult, err = ValidateStruct(valueField.Interface()) if err != nil { - err = PrependPathToErrors(err, typeField.Name) + err = prependPathToErrors(err, typeField.Name) errs = append(errs, err) } } @@ -980,6 +1034,42 @@ func ValidateStruct(s interface{}) (bool, error) { return result, err } +// ValidateStructAsync performs async validation of the struct and returns results through the channels +func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateStruct(s) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + +// ValidateMapAsync performs async validation of the map and returns results through the channels +func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { + res := make(chan bool) + errors := make(chan error) + + go func() { + defer close(res) + defer close(errors) + + isValid, isFailed := ValidateMap(s, m) + + res <- isValid + errors <- isFailed + }() + + return res, errors +} + // parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} func parseTagIntoMap(tag string) tagOptionsMap { optionsMap := make(tagOptionsMap) @@ -1028,12 +1118,12 @@ func IsSSN(str string) bool { return rxSSN.MatchString(str) } -// IsSemver check if string is valid semantic version +// IsSemver checks if string is valid semantic version func IsSemver(str string) bool { return rxSemver.MatchString(str) } -// IsType check if interface is of some type +// IsType checks if interface is of some type func IsType(v interface{}, params ...string) bool { if len(params) == 1 { typ := params[0] @@ -1042,13 +1132,13 @@ func IsType(v interface{}, params ...string) bool { return false } -// IsTime check if string is valid according to given format +// IsTime checks if string is valid according to given format func IsTime(str string, format string) bool { _, err := time.Parse(format, str) return err == nil } -// IsUnixTime check if string is valid unix timestamp value +// IsUnixTime checks if string is valid unix timestamp value func IsUnixTime(str string) bool { if _, err := strconv.Atoi(str); err == nil { return true @@ -1056,17 +1146,17 @@ func IsUnixTime(str string) bool { return false } -// IsRFC3339 check if string is valid timestamp value according to RFC3339 +// IsRFC3339 checks if string is valid timestamp value according to RFC3339 func IsRFC3339(str string) bool { return IsTime(str, time.RFC3339) } -// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, RF3339WithoutZone) + return IsTime(str, rfc3339WithoutZone) } -// IsISO4217 check if string is valid ISO currency code +// IsISO4217 checks if string is valid ISO currency code func IsISO4217(str string) bool { for _, currency := range ISO4217List { if str == currency { @@ -1077,7 +1167,7 @@ func IsISO4217(str string) bool { return false } -// ByteLength check string's length +// ByteLength checks string's length func ByteLength(str string, params ...string) bool { if len(params) == 2 { min, _ := ToInt(params[0]) @@ -1088,13 +1178,13 @@ func ByteLength(str string, params ...string) bool { return false } -// RuneLength check string's length +// RuneLength checks string's length // Alias for StringLength func RuneLength(str string, params ...string) bool { return StringLength(str, params...) } -// IsRsaPub check whether string is valid RSA key +// IsRsaPub checks whether string is valid RSA key // Alias for IsRsaPublicKey func IsRsaPub(str string, params ...string) bool { if len(params) == 1 { @@ -1114,7 +1204,7 @@ func StringMatches(s string, params ...string) bool { return false } -// StringLength check string's length (including multi byte strings) +// StringLength checks string's length (including multi byte strings) func StringLength(str string, params ...string) bool { if len(params) == 2 { @@ -1127,7 +1217,7 @@ func StringLength(str string, params ...string) bool { return false } -// MinStringLength check string's minimum length (including multi byte strings) +// MinStringLength checks string's minimum length (including multi byte strings) func MinStringLength(str string, params ...string) bool { if len(params) == 1 { @@ -1139,7 +1229,7 @@ func MinStringLength(str string, params ...string) bool { return false } -// MaxStringLength check string's maximum length (including multi byte strings) +// MaxStringLength checks string's maximum length (including multi byte strings) func MaxStringLength(str string, params ...string) bool { if len(params) == 1 { @@ -1151,7 +1241,7 @@ func MaxStringLength(str string, params ...string) bool { return false } -// Range check string's length +// Range checks string's length func Range(str string, params ...string) bool { if len(params) == 2 { value, _ := ToFloat(str) @@ -1163,6 +1253,7 @@ func Range(str string, params ...string) bool { return false } +// IsInRaw checks if string is in list of allowed values func IsInRaw(str string, params ...string) bool { if len(params) == 1 { rawParams := params[0] @@ -1175,7 +1266,7 @@ func IsInRaw(str string, params ...string) bool { return false } -// IsIn check if string str is a member of the set of strings params +// IsIn checks if string str is a member of the set of strings params func IsIn(str string, params ...string) bool { for _, param := range params { if str == param { @@ -1213,7 +1304,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tag := t.Tag.Get(tagName) - // Check if the field should be ignored + // checks if the field should be ignored switch tag { case "": if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { @@ -1232,8 +1323,8 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options options = parseTagIntoMap(tag) } - if !isFieldSet(v) { - // an empty value is not validated, check only required + if isEmptyValue(v) { + // an empty value is not validated, checks only required isValid, resultErr = checkRequired(v, t, options) for key := range options { delete(options, key) @@ -1286,13 +1377,13 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options validator := validatorSpec customMsgExists := len(validatorStruct.customErrorMessage) > 0 - // Check whether the tag looks like '!something' or 'something' + // checks whether the tag looks like '!something' or 'something' if validator[0] == '!' { validator = validator[1:] negate = true } - // Check for interface param validators + // checks for interface param validators for key, value := range InterfaceParamTagRegexMap { ps := value.FindStringSubmatch(validator) if len(ps) == 0 { @@ -1325,20 +1416,20 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.String: - // for each tag option check the map of validator functions + // for each tag option checks the map of validator functions for _, validatorSpec := range optionsOrder { validatorStruct := options[validatorSpec] var negate bool validator := validatorSpec customMsgExists := len(validatorStruct.customErrorMessage) > 0 - // Check whether the tag looks like '!something' or 'something' + // checks whether the tag looks like '!something' or 'something' if validator[0] == '!' { validator = validator[1:] negate = true } - // Check for param validators + // checks for param validators for key, value := range ParamTagRegexMap { ps := value.FindStringSubmatch(validator) if len(ps) == 0 { @@ -1419,7 +1510,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options } else { resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) if err != nil { - err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) return false, err } } @@ -1439,7 +1530,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options } else { resultItem, err = ValidateStruct(v.Index(i).Interface()) if err != nil { - err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) return false, err } } @@ -1453,7 +1544,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options } return ValidateStruct(v.Interface()) case reflect.Ptr: - // If the value is a pointer then check its element + // If the value is a pointer then checks its element if v.IsNil() { return true, nil } @@ -1469,14 +1560,26 @@ func stripParams(validatorString string) string { return paramsRegexp.ReplaceAllString(validatorString, "") } -// isFieldSet returns false for nil pointers, interfaces, maps, and slices. For all other values, it returns true. -func isFieldSet(v reflect.Value) bool { +// isEmptyValue checks whether value empty or not +func isEmptyValue(v reflect.Value) bool { switch v.Kind() { - case reflect.Map, reflect.Slice, reflect.Interface, reflect.Ptr: - return !v.IsNil() + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() } - return true + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) } // ErrorByField returns error for specified field of the struct @@ -1498,11 +1601,11 @@ func ErrorsByField(e error) map[string]string { } // prototype for ValidateStruct - switch e.(type) { + switch e := e.(type) { case Error: - m[e.(Error).Name] = e.(Error).Err.Error() + m[e.Name] = e.Err.Error() case Errors: - for _, item := range e.(Errors).Errors() { + for _, item := range e.Errors() { n := ErrorsByField(item) for k, v := range n { m[k] = v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index c43b1bc0a173a..a880a3de8fe99 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,7 +50,7 @@ package credentials import ( "fmt" - "sync/atomic" + "sync" "time" "github.com/aws/aws-sdk-go/aws/awserr" @@ -207,9 +207,10 @@ func (e *Expiry) ExpiresAt() time.Time { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds atomic.Value - sf singleflight.Group + sf singleflight.Group + m sync.RWMutex + creds Value provider Provider } @@ -218,7 +219,6 @@ func NewCredentials(provider Provider) *Credentials { c := &Credentials{ provider: provider, } - c.creds.Store(Value{}) return c } @@ -235,8 +235,17 @@ func NewCredentials(provider Provider) *Credentials { // // Passed in Context is equivalent to aws.Context, and context.Context. func (c *Credentials) GetWithContext(ctx Context) (Value, error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) } // Cannot pass context down to the actual retrieve, because the first @@ -254,18 +263,23 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) { } } -func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { - if curCreds := c.creds.Load(); !c.isExpired(curCreds) { - return curCreds.(Value), nil +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil } + var creds Value + var err error if p, ok := c.provider.(ProviderWithContext); ok { creds, err = p.RetrieveWithContext(ctx) } else { creds, err = c.provider.Retrieve() } if err == nil { - c.creds.Store(creds) + c.creds = creds } return creds, err @@ -290,7 +304,10 @@ func (c *Credentials) Get() (Value, error) { // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { - c.creds.Store(Value{}) + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} } // IsExpired returns if the credentials are no longer valid, and need @@ -299,11 +316,32 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - return c.isExpired(c.creds.Load()) + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) } -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired(creds interface{}) bool { +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() } @@ -311,13 +349,17 @@ func (c *Credentials) isExpired(creds interface{}) bool { // the underlying Provider, if it supports that interface. Otherwise, it returns // an error. func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + expirer, ok := c.provider.(Expirer) if !ok { return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), nil) } - if c.creds.Load().(Value) == (Value{}) { + if c.creds == (Value{}) { // set expiration time to the distant past return time.Time{}, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 8a2c56945f6b5..1b447aea580a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -593,6 +593,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -602,6 +603,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -721,6 +723,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -760,6 +763,7 @@ var awsPartition = partition{ "appsync": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -772,6 +776,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -782,6 +787,7 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -835,6 +841,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -844,6 +851,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -858,6 +866,7 @@ var awsPartition = partition{ "backup": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -867,6 +876,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -961,7 +971,7 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "aws-global": endpoint{ - Hostname: "service.chime.aws.amazon.com", + Hostname: "chime.us-east-1.amazonaws.com", Protocols: []string{"https"}, CredentialScope: credentialScope{ Region: "us-east-1", @@ -2034,12 +2044,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ec2": service{ @@ -2190,6 +2230,12 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "fips-us-west-1": endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "fips-us-west-2": endpoint{ Hostname: "fips.eks.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -2953,6 +2999,7 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3096,7 +3143,12 @@ var awsPartition = partition{ "health": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, }, }, "honeycode": service{ @@ -3448,6 +3500,7 @@ var awsPartition = partition{ "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -3511,11 +3564,35 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "lambda": service{ @@ -4401,6 +4478,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, @@ -4821,6 +4899,12 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "fips-aws-global": endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "route53domains": service{ @@ -4876,6 +4960,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4885,6 +4970,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -5427,10 +5513,16 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "servicequotas": service{ @@ -5786,6 +5878,12 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "fips-us-east-1": endpoint{ Hostname: "ssm-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -5812,34 +5910,10 @@ var awsPartition = partition{ }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, - "ssm-facade-fips-us-east-1": endpoint{ - Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "ssm-facade-fips-us-east-2": endpoint{ - Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "ssm-facade-fips-us-west-1": endpoint{ - Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "ssm-facade-fips-us-west-2": endpoint{ - Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "states": service{ @@ -6181,7 +6255,9 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -6718,7 +6794,8 @@ var awscnPartition = partition{ "appsync": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "athena": service{ @@ -7005,6 +7082,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "fsx": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "gamelift": service{ Endpoints: endpoints{ @@ -7067,6 +7151,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "iotevents": service{ Endpoints: endpoints{ @@ -7119,6 +7209,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -7282,6 +7378,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -7295,6 +7398,13 @@ var awscnPartition = partition{ }, }, }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -7490,8 +7600,18 @@ var awsusgovPartition = partition{ "acm": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "acm-pca": service{ @@ -7590,8 +7710,12 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "appstream2": service{ @@ -7646,8 +7770,12 @@ var awsusgovPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "backup": service{ @@ -7799,6 +7927,12 @@ var awsusgovPartition = partition{ "cognito-identity": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -8004,6 +8138,18 @@ var awsusgovPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8203,6 +8349,12 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "dataplane-us-gov-west-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -8217,6 +8369,7 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "guardduty.us-gov-west-1.amazonaws.com", @@ -8337,6 +8490,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -8494,12 +8659,17 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ Hostname: "pinpoint.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, }, }, "polly": service{ @@ -8599,6 +8769,12 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "route53resolver": service{ @@ -8845,18 +9021,6 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, - "ssm-facade-fips-us-gov-east-1": endpoint{ - Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "ssm-facade-fips-us-gov-west-1": endpoint{ - Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -9446,6 +9610,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ @@ -9481,6 +9653,17 @@ var awsisobPartition = partition{ }, }, Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, "application-autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -9560,6 +9743,12 @@ var awsisobPartition = partition{ }, }, }, + "ecs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -9580,6 +9769,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 3d79d3a386e4d..a849fbc75b492 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.5" +const SDKVersion = "1.35.31" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index cf9fad81e704d..55fa73ebcf2a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -63,9 +63,10 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenNone: MarkCompleteState, }, ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, }, ASTKindStatement: map[TokenType]int{ TokenLit: SectionState, diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index ebfb1dcb43843..f988e1fbdb3a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -1776,6 +1776,102 @@ func (d *discovererDescribeEndpoints) Handler(r *request.Request) { } } +const opDescribeExport = "DescribeExport" + +// DescribeExportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExport for more information on using the DescribeExport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportRequest method. +// req, resp := client.DescribeExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { + op := &request.Operation{ + Name: opDescribeExport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportInput{} + } + + output = &DescribeExportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExport API operation for Amazon DynamoDB. +// +// Describes an existing table export. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeExport for usage and error information. +// +// Returned Error Types: +// * ExportNotFoundException +// The specified export was not found. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + return out, req.Send() +} + +// DescribeExportWithContext is the same as DescribeExport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeGlobalTable = "DescribeGlobalTable" // DescribeGlobalTableRequest generates a "aws/request.Request" representing the @@ -2469,6 +2565,114 @@ func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opExportTableToPointInTime = "ExportTableToPointInTime" + +// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the ExportTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opExportTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportTableToPointInTimeInput{} + } + + output = &ExportTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportTableToPointInTime API operation for Amazon DynamoDB. +// +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExportTableToPointInTime for usage and error information. +// +// Returned Error Types: +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. +// +// * ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + return out, req.Send() +} + +// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See ExportTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetItem = "GetItem" // GetItemRequest generates a "aws/request.Request" representing the @@ -2851,140 +3055,291 @@ func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, inpu return p.Err() } -const opListGlobalTables = "ListGlobalTables" +const opListExports = "ListExports" -// ListGlobalTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListGlobalTables operation. The "output" return +// ListExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListExports operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGlobalTables for more information on using the ListGlobalTables +// See ListExports for more information on using the ListExports // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGlobalTablesRequest method. -// req, resp := client.ListGlobalTablesRequest(params) +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { op := &request.Operation{ - Name: opListGlobalTables, + Name: opListExports, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListGlobalTablesInput{} + input = &ListExportsInput{} } - output = &ListGlobalTablesOutput{} + output = &ListExportsOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// ListGlobalTables API operation for Amazon DynamoDB. -// -// Lists all global tables that have a replica in the specified Region. +// ListExports API operation for Amazon DynamoDB. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// Lists completed exports within the past 90 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListGlobalTables for usage and error information. +// API operation ListExports for usage and error information. // // Returned Error Types: +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) return out, req.Send() } -// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// ListExportsWithContext is the same as ListExports with the addition of // the ability to pass a context and additional request options. // -// See ListGlobalTables for details on how to use this API operation. +// See ListExports for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTables = "ListTables" - -// ListTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListTables operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTables for more information on using the ListTables -// API call, and error handling. +// ListExportsPages iterates over the pages of a ListExports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// See ListExports method for more information on how to use this operation. // +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the ListTablesRequest method. -// req, resp := client.ListTablesRequest(params) +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { + return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExportsPagesWithContext same as ListExportsPages except +// it takes a Context and allows setting request options on the pages. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { + op := &request.Operation{ + Name: opListGlobalTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGlobalTablesInput{} + } + + output = &ListGlobalTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListGlobalTables API operation for Amazon DynamoDB. +// +// Lists all global tables that have a replica in the specified Region. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListGlobalTables for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + return out, req.Send() +} + +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListGlobalTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTables for more information on using the ListTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { op := &request.Operation{ Name: opListTables, HTTPMethod: "POST", @@ -8064,7 +8419,7 @@ func (s *ContinuousBackupsUnavailableException) RequestID() string { return s.RespMetadata.RequestID } -// Represents a Contributor Insights summary entry.. +// Represents a Contributor Insights summary entry. type ContributorInsightsSummary struct { _ struct{} `type:"structure"` @@ -9800,6 +10155,70 @@ func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpoints return s } +type DescribeExportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the export. + // + // ExportArn is a required field + ExportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} + if s.ExportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExportArn")) + } + if s.ExportArn != nil && len(*s.ExportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportArn sets the ExportArn field's value. +func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { + s.ExportArn = &v + return s +} + +type DescribeExportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { + s.ExportDescription = v + return s +} + type DescribeGlobalTableInput struct { _ struct{} `type:"structure"` @@ -10392,71 +10811,576 @@ type ExpectedAttributeValue struct { // not compare to {"NS":["6", "2", "1"]} ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` - // Causes DynamoDB to evaluate the value before attempting a conditional operation: - // - // * If Exists is true, DynamoDB will check to see if that attribute value - // already exists in the table. If it is found, then the operation succeeds. - // If it is not found, the operation fails with a ConditionCheckFailedException. + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // * If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. + // + // * If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the + // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // * Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // * Exists is false but you also provide a Value. (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { + s.ComparisonOperator = &v + return s +} + +// SetExists sets the Exists field's value. +func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { + s.Exists = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { + s.Value = v + return s +} + +// There was a conflict when writing to the specified S3 bucket. +type ExportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportConflictException) GoString() string { + return s.String() +} + +func newErrorExportConflictException(v protocol.ResponseMetadata) error { + return &ExportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportConflictException) Code() string { + return "ExportConflictException" +} + +// Message returns the exception's message. +func (s *ExportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportConflictException) OrigErr() error { + return nil +} + +func (s *ExportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of the exported table. +type ExportDescription struct { + _ struct{} `type:"structure"` + + // The billable size of the table export. + BilledSizeBytes *int64 `type:"long"` + + // The client token that was provided for the export task. A client token makes + // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple + // identical calls have the same effect as one single call. + ClientToken *string `type:"string"` + + // The time at which the export task completed. + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table export. + ExportArn *string `min:"37" type:"string"` + + // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // The name of the manifest file for the export task. + ExportManifest *string `type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` + + // Point in time from which table data was exported. + ExportTime *time.Time `type:"timestamp"` + + // Status code for the result of the failed export. + FailureCode *string `type:"string"` + + // Export failure reason description. + FailureMessage *string `type:"string"` + + // The number of items exported. + ItemCount *int64 `type:"long"` + + // The name of the Amazon S3 bucket containing the export. + S3Bucket *string `type:"string"` + + // The ID of the AWS account that owns the bucket containing the export. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix used as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data is stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with AWS KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // data is stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The time at which the export task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table that was exported. + TableArn *string `type:"string"` + + // Unique ID of the table that was exported. + TableId *string `type:"string"` +} + +// String returns the string representation +func (s ExportDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportDescription) GoString() string { + return s.String() +} + +// SetBilledSizeBytes sets the BilledSizeBytes field's value. +func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription { + s.BilledSizeBytes = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportDescription) SetClientToken(v string) *ExportDescription { + s.ClientToken = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription { + s.EndTime = &v + return s +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportDescription) SetExportArn(v string) *ExportDescription { + s.ExportArn = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportDescription) SetExportFormat(v string) *ExportDescription { + s.ExportFormat = &v + return s +} + +// SetExportManifest sets the ExportManifest field's value. +func (s *ExportDescription) SetExportManifest(v string) *ExportDescription { + s.ExportManifest = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportDescription) SetExportStatus(v string) *ExportDescription { + s.ExportStatus = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { + s.ExportTime = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { + s.FailureMessage = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { + s.ItemCount = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription { + s.S3SseKmsKeyId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportDescription) SetTableArn(v string) *ExportDescription { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ExportDescription) SetTableId(v string) *ExportDescription { + s.TableId = &v + return s +} + +// The specified export was not found. +type ExportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportNotFoundException) GoString() string { + return s.String() +} + +func newErrorExportNotFoundException(v protocol.ResponseMetadata) error { + return &ExportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportNotFoundException) Code() string { + return "ExportNotFoundException" +} + +// Message returns the exception's message. +func (s *ExportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportNotFoundException) OrigErr() error { + return nil +} + +func (s *ExportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about an export task. +type ExportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export. + ExportArn *string `min:"37" type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` +} + +// String returns the string representation +func (s ExportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportSummary) GoString() string { + return s.String() +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportSummary) SetExportArn(v string) *ExportSummary { + s.ExportArn = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { + s.ExportStatus = &v + return s +} + +type ExportTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. // - // * If Exists is false, DynamoDB assumes that the attribute value does not - // exist in the table. If in fact the value does not exist, then the assumption - // is valid and the operation succeeds. If the value is found, despite the - // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. // - // The default setting for Exists is true. If you supply a Value all by itself, - // DynamoDB assumes the attribute exists: You don't have to set Exists to true, - // because it is implied. + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // Time in the past from which to export table data. The table export will be + // a snapshot of the table's state at this point in time. + ExportTime *time.Time `type:"timestamp"` + + // The name of the Amazon S3 bucket to export the snapshot to. // - // DynamoDB returns a ValidationException if: + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The ID of the AWS account that owns the bucket the export will be stored + // in. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix to use as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data will be stored. Valid + // values for S3SseAlgorithm are: // - // * Exists is true but there is no Value to check. (You expect a value to - // exist, but don't specify what that value is.) + // * AES256 - server-side encryption with Amazon S3 managed keys // - // * Exists is false but you also provide a Value. (You cannot expect an - // attribute to have a value, while also expecting it not to exist.) - Exists *bool `type:"boolean"` + // * KMS - server-side encryption with AWS KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - // Represents the data for the expected attribute. - // - // Each attribute value is described as a name-value pair. The name is the data - // type, and the value is the data itself. + // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // data will be stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the table to export. // - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. - Value *AttributeValue `type:"structure"` + // TableArn is a required field + TableArn *string `type:"string" required:"true"` } // String returns the string representation -func (s ExpectedAttributeValue) String() string { +func (s ExportTableToPointInTimeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExpectedAttributeValue) GoString() string { +func (s ExportTableToPointInTimeInput) GoString() string { return s.String() } -// SetAttributeValueList sets the AttributeValueList field's value. -func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { - s.AttributeValueList = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1)) + } + if s.TableArn == nil { + invalidParams.Add(request.NewErrParamRequired("TableArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput { + s.ClientToken = &v return s } -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { - s.ComparisonOperator = &v +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput { + s.ExportFormat = &v return s } -// SetExists sets the Exists field's value. -func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { - s.Exists = &v +// SetExportTime sets the ExportTime field's value. +func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput { + s.ExportTime = &v return s } -// SetValue sets the Value field's value. -func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { - s.Value = v +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput { + s.S3SseKmsKeyId = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput { + s.TableArn = &v + return s +} + +type ExportTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains a description of the table export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation +func (s ExportTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput { + s.ExportDescription = v return s } @@ -11709,6 +12633,62 @@ func (s *InternalServerError) RequestID() string { return s.RespMetadata.RequestID } +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidExportTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidExportTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { + return &InvalidExportTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportTimeException) Code() string { + return "InvalidExportTimeException" +} + +// Message returns the exception's message. +func (s *InvalidExportTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportTimeException) OrigErr() error { + return nil +} + +func (s *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime // and LatestRestorableDateTime. type InvalidRestoreTimeException struct { @@ -12402,6 +13382,95 @@ func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorI return s } +type ListExportsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `min:"1" type:"integer"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListExports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) associated with the exported table. + TableArn *string `type:"string"` +} + +// String returns the string representation +func (s ListExportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListExportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListExportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput { + s.NextToken = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput { + s.TableArn = &v + return s +} + +type ListExportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ExportSummary objects. + ExportSummaries []*ExportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListExports again, with NextToken set to this value. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListExportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListExportsOutput) GoString() string { + return s.String() +} + +// SetExportSummaries sets the ExportSummaries field's value. +func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput { + s.ExportSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput { + s.NextToken = &v + return s +} + type ListGlobalTablesInput struct { _ struct{} `type:"structure"` @@ -14615,6 +15684,12 @@ type ReplicaDescription struct { // 20 hours, DynamoDB will remove this replica from the replication group. // The replica will not be deleted and replication will stop from and to // this region. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The AWS KMS key used to encrypt + // the table is inaccessible. If the AWS KMS key remains inaccessible for + // more than 20 hours, DynamoDB will remove this replica from the replication + // group. The replica will not be deleted and replication will stop from + // and to this region. ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` // Detailed information about the replica status. @@ -20126,6 +21201,42 @@ func ContributorInsightsStatus_Values() []string { } } +const ( + // ExportFormatDynamodbJson is a ExportFormat enum value + ExportFormatDynamodbJson = "DYNAMODB_JSON" + + // ExportFormatIon is a ExportFormat enum value + ExportFormatIon = "ION" +) + +// ExportFormat_Values returns all elements of the ExportFormat enum +func ExportFormat_Values() []string { + return []string{ + ExportFormatDynamodbJson, + ExportFormatIon, + } +} + +const ( + // ExportStatusInProgress is a ExportStatus enum value + ExportStatusInProgress = "IN_PROGRESS" + + // ExportStatusCompleted is a ExportStatus enum value + ExportStatusCompleted = "COMPLETED" + + // ExportStatusFailed is a ExportStatus enum value + ExportStatusFailed = "FAILED" +) + +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusCompleted, + ExportStatusFailed, + } +} + const ( // GlobalTableStatusCreating is a GlobalTableStatus enum value GlobalTableStatusCreating = "CREATING" @@ -20244,6 +21355,9 @@ const ( // ReplicaStatusRegionDisabled is a ReplicaStatus enum value ReplicaStatusRegionDisabled = "REGION_DISABLED" + + // ReplicaStatusInaccessibleEncryptionCredentials is a ReplicaStatus enum value + ReplicaStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" ) // ReplicaStatus_Values returns all elements of the ReplicaStatus enum @@ -20255,6 +21369,7 @@ func ReplicaStatus_Values() []string { ReplicaStatusDeleting, ReplicaStatusActive, ReplicaStatusRegionDisabled, + ReplicaStatusInaccessibleEncryptionCredentials, } } @@ -20351,6 +21466,22 @@ func ReturnValuesOnConditionCheckFailure_Values() []string { } } +const ( + // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value + S3SseAlgorithmAes256 = "AES256" + + // S3SseAlgorithmKms is a S3SseAlgorithm enum value + S3SseAlgorithmKms = "KMS" +) + +// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum +func S3SseAlgorithm_Values() []string { + return []string{ + S3SseAlgorithmAes256, + S3SseAlgorithmKms, + } +} + const ( // SSEStatusEnabling is a SSEStatus enum value SSEStatusEnabling = "ENABLING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go index 1ba6dbfe24597..d92f770ca1b7a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -111,6 +111,10 @@ type DynamoDBAPI interface { DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error) DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput) + DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error) + DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error) + DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput) + DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error) DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error) DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput) @@ -135,6 +139,10 @@ type DynamoDBAPI interface { DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) + ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) @@ -150,6 +158,13 @@ type DynamoDBAPI interface { ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error + ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error) + ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error) + ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput) + + ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error + ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error + ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error) ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error) ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index b7e2d40b21d98..8a9f3485dfdeb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -33,6 +33,18 @@ const ( // Backups have not yet been enabled for this table. ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + // ErrCodeExportConflictException for service response error code + // "ExportConflictException". + // + // There was a conflict when writing to the specified S3 bucket. + ErrCodeExportConflictException = "ExportConflictException" + + // ErrCodeExportNotFoundException for service response error code + // "ExportNotFoundException". + // + // The specified export was not found. + ErrCodeExportNotFoundException = "ExportNotFoundException" + // ErrCodeGlobalTableAlreadyExistsException for service response error code // "GlobalTableAlreadyExistsException". // @@ -64,6 +76,12 @@ const ( // An error occurred on the server side. ErrCodeInternalServerError = "InternalServerError" + // ErrCodeInvalidExportTimeException for service response error code + // "InvalidExportTimeException". + // + // The specified ExportTime is outside of the point in time recovery window. + ErrCodeInvalidExportTimeException = "InvalidExportTimeException" + // ErrCodeInvalidRestoreTimeException for service response error code // "InvalidRestoreTimeException". // @@ -274,11 +292,14 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "BackupNotFoundException": newErrorBackupNotFoundException, "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, + "ExportConflictException": newErrorExportConflictException, + "ExportNotFoundException": newErrorExportNotFoundException, "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, "IndexNotFoundException": newErrorIndexNotFoundException, "InternalServerError": newErrorInternalServerError, + "InvalidExportTimeException": newErrorInvalidExportTimeException, "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException, "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, "LimitExceededException": newErrorLimitExceededException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index c5b880d18cc44..3986270247359 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1190,6 +1190,98 @@ func (c *EC2) AssociateDhcpOptionsWithContext(ctx aws.Context, input *AssociateD return out, req.Send() } +const opAssociateEnclaveCertificateIamRole = "AssociateEnclaveCertificateIamRole" + +// AssociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateEnclaveCertificateIamRole for more information on using the AssociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.AssociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRoleRequest(input *AssociateEnclaveCertificateIamRoleInput) (req *request.Request, output *AssociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opAssociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateEnclaveCertificateIamRoleInput{} + } + + output = &AssociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Associates an AWS Identity and Access Management (IAM) role with an AWS Certificate +// Manager (ACM) certificate. This enables the certificate to be used by the +// ACM for Nitro Enclaves application inside an enclave. For more information, +// see AWS Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html) +// in the AWS Nitro Enclaves User Guide. +// +// When the IAM role is associated with the ACM certificate, places the certificate, +// certificate chain, and encrypted private key in an Amazon S3 bucket that +// only the associated IAM role can access. The private key of the certificate +// is encrypted with an AWS-managed KMS customer master (CMK) that has an attached +// attestation-based CMK policy. +// +// To enable the IAM role to access the Amazon S3 object, you must grant it +// permission to call s3:GetObject on the Amazon S3 bucket returned by the command. +// To enable the IAM role to access the AWS KMS CMK, you must grant it permission +// to call kms:Decrypt on AWS KMS CMK returned by the command. For more information, +// see Grant the role permission to access the certificate and encryption key +// (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy) +// in the AWS Nitro Enclaves User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRole(input *AssociateEnclaveCertificateIamRoleInput) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// AssociateEnclaveCertificateIamRoleWithContext is the same as AssociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *AssociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" // AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the @@ -7532,6 +7624,10 @@ func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *requ // the subnets in which to create an endpoint, and the security groups to associate // with the endpoint network interface. // +// A GatewayLoadBalancer endpoint is a network interface in your subnet that +// serves an endpoint for communicating with a Gateway Load Balancer that you've +// configured as a VPC endpoint service. +// // Use DescribeVpcEndpointServices to get a list of supported services. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7687,12 +7783,19 @@ func (c *EC2) CreateVpcEndpointServiceConfigurationRequest(input *CreateVpcEndpo // CreateVpcEndpointServiceConfiguration API operation for Amazon Elastic Compute Cloud. // // Creates a VPC endpoint service configuration to which service consumers (AWS -// accounts, IAM users, and IAM roles) can connect. Service consumers can create -// an interface VPC endpoint to connect to your service. +// accounts, IAM users, and IAM roles) can connect. // -// To create an endpoint service configuration, you must first create a Network -// Load Balancer for your service. For more information, see VPC Endpoint Services -// (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html) +// To create an endpoint service configuration, you must first create one of +// the following for your service: +// +// * A Network Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html). +// Service consumers connect to your service using an interface endpoint. +// +// * A Gateway Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/introduction.html). +// Service consumers connect to your service using a Gateway Load Balancer +// endpoint. +// +// For more information, see VPC Endpoint Services (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html) // in the Amazon Virtual Private Cloud User Guide. // // If you set the private DNS name, you must prove that you own the private @@ -8574,11 +8677,29 @@ func (c *EC2) DeleteFleetsRequest(input *DeleteFleetsInput) (req *request.Reques // // Deletes the specified EC2 Fleet. // -// After you delete an EC2 Fleet, it launches no new instances. You must specify -// whether an EC2 Fleet should also terminate its instances. If you terminate -// the instances, the EC2 Fleet enters the deleted_terminating state. Otherwise, -// the EC2 Fleet enters the deleted_running state, and the instances continue -// to run until they are interrupted or you terminate them manually. +// After you delete an EC2 Fleet, it launches no new instances. +// +// You must specify whether a deleted EC2 Fleet should also terminate its instances. +// If you choose to terminate the instances, the EC2 Fleet enters the deleted_terminating +// state. Otherwise, the EC2 Fleet enters the deleted_running state, and the +// instances continue to run until they are interrupted or you terminate them +// manually. +// +// For instant fleets, EC2 Fleet must terminate the instances when the fleet +// is deleted. A deleted instant fleet with running instances is not supported. +// +// Restrictions +// +// * You can delete up to 25 instant fleets in a single request. If you exceed +// this number, no instant fleets are deleted and an error is returned. There +// is no restriction on the number of fleets of type maintain or request +// that can be deleted in a single request. +// +// * Up to 1000 instances can be terminated in a single request to delete +// instant fleets. +// +// For more information, see Deleting an EC2 Fleet (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet) +// in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11545,8 +11666,10 @@ func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *re // // Deletes one or more specified VPC endpoints. Deleting a gateway endpoint // also deletes the endpoint routes in the route tables that were associated -// with the endpoint. Deleting an interface endpoint deletes the endpoint network -// interfaces. +// with the endpoint. Deleting an interface endpoint or a Gateway Load Balancer +// endpoint deletes the endpoint network interfaces. Gateway Load Balancer endpoints +// can only be deleted if the routes that are associated with the endpoint are +// deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -25354,6 +25477,13 @@ func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServi // // Describes available services to which you can create a VPC endpoint. // +// When the service provider and the consumer have different accounts multiple +// Availability Zones, and the consumer views the VPC endpoint service information, +// the response only includes the common Availability Zones. For example, when +// the service provider account uses us-east-1a and us-east-1c and the consumer +// uses us-east-1a and us-east-1a and us-east-1b, the response includes the +// VPC endpoint services in the common Availability Zone, us-east-1a. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26960,6 +27090,86 @@ func (c *EC2) DisassociateClientVpnTargetNetworkWithContext(ctx aws.Context, inp return out, req.Send() } +const opDisassociateEnclaveCertificateIamRole = "DisassociateEnclaveCertificateIamRole" + +// DisassociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateEnclaveCertificateIamRole for more information on using the DisassociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.DisassociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRoleRequest(input *DisassociateEnclaveCertificateIamRoleInput) (req *request.Request, output *DisassociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opDisassociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateEnclaveCertificateIamRoleInput{} + } + + output = &DisassociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an IAM role from an AWS Certificate Manager (ACM) certificate. +// Disassociating an IAM role from an ACM certificate removes the Amazon S3 +// object that contains the certificate, certificate chain, and encrypted private +// key from the Amazon S3 bucket. It also revokes the IAM role's permission +// to use the AWS Key Management Service (KMS) customer master key (CMK) used +// to encrypt the private key. This effectively revokes the role's permission +// to use the certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRole(input *DisassociateEnclaveCertificateIamRoleInput) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// DisassociateEnclaveCertificateIamRoleWithContext is the same as DisassociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *DisassociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" // DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the @@ -28289,6 +28499,85 @@ func (c *EC2) ExportTransitGatewayRoutesWithContext(ctx aws.Context, input *Expo return out, req.Send() } +const opGetAssociatedEnclaveCertificateIamRoles = "GetAssociatedEnclaveCertificateIamRoles" + +// GetAssociatedEnclaveCertificateIamRolesRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociatedEnclaveCertificateIamRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociatedEnclaveCertificateIamRoles for more information on using the GetAssociatedEnclaveCertificateIamRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociatedEnclaveCertificateIamRolesRequest method. +// req, resp := client.GetAssociatedEnclaveCertificateIamRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesRequest(input *GetAssociatedEnclaveCertificateIamRolesInput) (req *request.Request, output *GetAssociatedEnclaveCertificateIamRolesOutput) { + op := &request.Operation{ + Name: opGetAssociatedEnclaveCertificateIamRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAssociatedEnclaveCertificateIamRolesInput{} + } + + output = &GetAssociatedEnclaveCertificateIamRolesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociatedEnclaveCertificateIamRoles API operation for Amazon Elastic Compute Cloud. +// +// Returns the IAM roles that are associated with the specified AWS Certificate +// Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket +// and the Amazon S3 object key where the certificate, certificate chain, and +// encrypted private key bundle are stored, and the ARN of the AWS Key Management +// Service (KMS) customer master key (CMK) that's used to encrypt the private +// key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAssociatedEnclaveCertificateIamRoles for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRoles(input *GetAssociatedEnclaveCertificateIamRolesInput) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + return out, req.Send() +} + +// GetAssociatedEnclaveCertificateIamRolesWithContext is the same as GetAssociatedEnclaveCertificateIamRoles with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociatedEnclaveCertificateIamRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesWithContext(ctx aws.Context, input *GetAssociatedEnclaveCertificateIamRolesInput, opts ...request.Option) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetAssociatedIpv6PoolCidrs = "GetAssociatedIpv6PoolCidrs" // GetAssociatedIpv6PoolCidrsRequest generates a "aws/request.Request" representing the @@ -33616,8 +33905,8 @@ func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *requ // ModifyVpcEndpoint API operation for Amazon Elastic Compute Cloud. // // Modifies attributes of a specified VPC endpoint. The attributes that you -// can modify depend on the type of VPC endpoint (interface or gateway). For -// more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) +// can modify depend on the type of VPC endpoint (interface, gateway, or Gateway +// Load Balancer). For more information, see VPC Endpoints (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -33769,9 +34058,9 @@ func (c *EC2) ModifyVpcEndpointServiceConfigurationRequest(input *ModifyVpcEndpo // ModifyVpcEndpointServiceConfiguration API operation for Amazon Elastic Compute Cloud. // // Modifies the attributes of your VPC endpoint service configuration. You can -// change the Network Load Balancers for your service, and you can specify whether -// acceptance is required for requests to connect to your endpoint service through -// an interface VPC endpoint. +// change the Network Load Balancers or Gateway Load Balancers for your service, +// and you can specify whether acceptance is required for requests to connect +// to your endpoint service through an interface VPC endpoint. // // If you set or modify the private DNS name, you must prove that you own the // private DNS domain name. For more information, see VPC Endpoint Service Private @@ -34958,7 +35247,7 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request. // succeeds if the instances are valid and belong to you. Requests to reboot // terminated instances are ignored. // -// If an instance does not cleanly shut down within four minutes, Amazon EC2 +// If an instance does not cleanly shut down within a few minutes, Amazon EC2 // performs a hard reboot. // // For more information about troubleshooting, see Getting console output and @@ -37294,16 +37583,22 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI output = &RevokeSecurityGroupEgressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. // // [VPC only] Removes the specified egress rules from a security group for EC2-VPC. -// This action doesn't apply to security groups for use in EC2-Classic. To remove -// a rule, the values that you specify (for example, ports) must match the existing -// rule's values exactly. +// This action does not apply to security groups for use in EC2-Classic. To +// remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. +// +// [Default VPC] If the values you specify do not match the existing rule's +// values, no error is returned, and the output describes the security group +// rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source // security group. For the TCP and UDP protocols, you must also specify the @@ -37381,7 +37676,6 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres output = &RevokeSecurityGroupIngressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -37391,9 +37685,12 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // the values that you specify (for example, ports) must match the existing // rule's values exactly. // -// [EC2-Classic only] If the values you specify do not match the existing rule's -// values, no error is returned. Use DescribeSecurityGroups to verify that the -// rule has been removed. +// [EC2-Classic , default VPC] If the values you specify do not match the existing +// rule's values, no error is returned, and the output describes the security +// group rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the CIDR range or source security // group. For the TCP and UDP protocols, you must also specify the destination @@ -38222,7 +38519,7 @@ func (c *EC2) StartVpcEndpointServicePrivateDnsVerificationRequest(input *StartV // // Before the service provider runs this command, they must add a record to // the DNS server. For more information, see Adding a TXT Record to Your Domain's -// DNS Server (https://docs.aws.amazon.com/vpc/latest/userguide/ndpoint-services-dns-validation.html#add-dns-txt-record) +// DNS Server (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html#add-dns-txt-record) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -40771,6 +41068,109 @@ func (s AssociateDhcpOptionsOutput) GoString() string { return s.String() } +type AssociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate with which to associate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to associate with the ACM certificate. You can associate + // up to 16 IAM roles with an ACM certificate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *AssociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type AssociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which the certificate was uploaded. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The Amazon S3 object key where the certificate, certificate chain, and encrypted + // private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the AWS KMS CMK used to encrypt the private key of the certificate. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3BucketName(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3ObjectKey(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetEncryptionKmsKeyId(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.EncryptionKmsKeyId = &v + return s +} + type AssociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -41329,6 +41729,59 @@ func (s *AssociateVpcCidrBlockOutput) SetVpcId(v string) *AssociateVpcCidrBlockO return s } +// Information about the associated IAM roles. +type AssociatedRole struct { + _ struct{} `type:"structure"` + + // The ARN of the associated IAM role. + AssociatedRoleArn *string `locationName:"associatedRoleArn" min:"1" type:"string"` + + // The name of the Amazon S3 bucket in which the Amazon S3 object is stored. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The key of the Amazon S3 object ey where the certificate, certificate chain, + // and encrypted private key bundle is stored. The object key is formated as + // follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the KMS customer master key (CMK) used to encrypt the private key. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociatedRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatedRole) GoString() string { + return s.String() +} + +// SetAssociatedRoleArn sets the AssociatedRoleArn field's value. +func (s *AssociatedRole) SetAssociatedRoleArn(v string) *AssociatedRole { + s.AssociatedRoleArn = &v + return s +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociatedRole) SetCertificateS3BucketName(v string) *AssociatedRole { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociatedRole) SetCertificateS3ObjectKey(v string) *AssociatedRole { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociatedRole) SetEncryptionKmsKeyId(v string) *AssociatedRole { + s.EncryptionKmsKeyId = &v + return s +} + // Describes a target network that is associated with a Client VPN endpoint. // A target network is a subnet in a VPC. type AssociatedTargetNetwork struct { @@ -41596,6 +42049,11 @@ type AttachNetworkInterfaceInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. // // NetworkInterfaceId is a required field @@ -41649,6 +42107,12 @@ func (s *AttachNetworkInterfaceInput) SetInstanceId(v string) *AttachNetworkInte return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceInput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceInput { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *AttachNetworkInterfaceInput) SetNetworkInterfaceId(v string) *AttachNetworkInterfaceInput { s.NetworkInterfaceId = &v @@ -41661,6 +42125,9 @@ type AttachNetworkInterfaceOutput struct { // The ID of the network interface attachment. AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` } // String returns the string representation @@ -41679,6 +42146,12 @@ func (s *AttachNetworkInterfaceOutput) SetAttachmentId(v string) *AttachNetworkI return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceOutput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceOutput { + s.NetworkCardIndex = &v + return s +} + type AttachVolumeInput struct { _ struct{} `type:"structure"` @@ -43937,7 +44410,7 @@ func (s *CapacityReservationGroup) SetOwnerId(v string) *CapacityReservationGrou // For more information about Capacity Reservations, see On-Demand Capacity // Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) // in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity -// Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) // in the Amazon Elastic Compute Cloud User Guide. type CapacityReservationOptions struct { _ struct{} `type:"structure"` @@ -43982,7 +44455,7 @@ func (s *CapacityReservationOptions) SetUsageStrategy(v string) *CapacityReserva // For more information about Capacity Reservations, see On-Demand Capacity // Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) // in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity -// Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) // in the Amazon Elastic Compute Cloud User Guide. type CapacityReservationOptionsRequest struct { _ struct{} `type:"structure"` @@ -44551,6 +45024,84 @@ func (s *ClientCertificateRevocationListStatus) SetMessage(v string) *ClientCert return s } +// The options for managing connection authorization for new client connections. +type ClientConnectOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. The default is false + // (not enabled). + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `type:"string"` +} + +// String returns the string representation +func (s ClientConnectOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectOptions) SetEnabled(v bool) *ClientConnectOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectOptions) SetLambdaFunctionArn(v string) *ClientConnectOptions { + s.LambdaFunctionArn = &v + return s +} + +// The options for managing connection authorization for new client connections. +type ClientConnectResponseOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `locationName:"lambdaFunctionArn" type:"string"` + + // The status of any updates to the client connect options. + Status *ClientVpnEndpointAttributeStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s ClientConnectResponseOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectResponseOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectResponseOptions) SetEnabled(v bool) *ClientConnectResponseOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectResponseOptions) SetLambdaFunctionArn(v string) *ClientConnectResponseOptions { + s.LambdaFunctionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientConnectResponseOptions) SetStatus(v *ClientVpnEndpointAttributeStatus) *ClientConnectResponseOptions { + s.Status = v + return s +} + // Describes the client-specific data. type ClientData struct { _ struct{} `type:"structure"` @@ -44603,7 +45154,7 @@ func (s *ClientData) SetUploadStart(v time.Time) *ClientData { } // Describes the authentication methods used by a Client VPN endpoint. For more -// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/client-authentication.html) // in the AWS Client VPN Administrator Guide. type ClientVpnAuthentication struct { _ struct{} `type:"structure"` @@ -44779,6 +45330,10 @@ type ClientVpnConnection struct { // The number of packets sent by the client. IngressPackets *string `locationName:"ingressPackets" type:"string"` + // The statuses returned by the client connect handler for posture compliance, + // if applicable. + PostureComplianceStatuses []*string `locationName:"postureComplianceStatusSet" locationNameList:"item" type:"list"` + // The current state of the client connection. Status *ClientVpnConnectionStatus `locationName:"status" type:"structure"` @@ -44860,6 +45415,12 @@ func (s *ClientVpnConnection) SetIngressPackets(v string) *ClientVpnConnection { return s } +// SetPostureComplianceStatuses sets the PostureComplianceStatuses field's value. +func (s *ClientVpnConnection) SetPostureComplianceStatuses(v []*string) *ClientVpnConnection { + s.PostureComplianceStatuses = v + return s +} + // SetStatus sets the Status field's value. func (s *ClientVpnConnection) SetStatus(v *ClientVpnConnectionStatus) *ClientVpnConnection { s.Status = v @@ -44928,6 +45489,9 @@ type ClientVpnEndpoint struct { // are assigned. ClientCidrBlock *string `locationName:"clientCidrBlock" type:"string"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectResponseOptions `locationName:"clientConnectOptions" type:"structure"` + // The ID of the Client VPN endpoint. ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` @@ -44953,6 +45517,9 @@ type ClientVpnEndpoint struct { // The IDs of the security groups for the target network. SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"` + // The URL of the self-service portal. + SelfServicePortalUrl *string `locationName:"selfServicePortalUrl" type:"string"` + // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` @@ -45010,6 +45577,12 @@ func (s *ClientVpnEndpoint) SetClientCidrBlock(v string) *ClientVpnEndpoint { return s } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ClientVpnEndpoint) SetClientConnectOptions(v *ClientConnectResponseOptions) *ClientVpnEndpoint { + s.ClientConnectOptions = v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *ClientVpnEndpoint) SetClientVpnEndpointId(v string) *ClientVpnEndpoint { s.ClientVpnEndpointId = &v @@ -45058,6 +45631,12 @@ func (s *ClientVpnEndpoint) SetSecurityGroupIds(v []*string) *ClientVpnEndpoint return s } +// SetSelfServicePortalUrl sets the SelfServicePortalUrl field's value. +func (s *ClientVpnEndpoint) SetSelfServicePortalUrl(v string) *ClientVpnEndpoint { + s.SelfServicePortalUrl = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *ClientVpnEndpoint) SetServerCertificateArn(v string) *ClientVpnEndpoint { s.ServerCertificateArn = &v @@ -45106,6 +45685,39 @@ func (s *ClientVpnEndpoint) SetVpnProtocol(v string) *ClientVpnEndpoint { return s } +// Describes the status of the Client VPN endpoint attribute. +type ClientVpnEndpointAttributeStatus struct { + _ struct{} `type:"structure"` + + // The status code. + Code *string `locationName:"code" type:"string" enum:"ClientVpnEndpointAttributeStatusCode"` + + // The status message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnEndpointAttributeStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnEndpointAttributeStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnEndpointAttributeStatus) SetCode(v string) *ClientVpnEndpointAttributeStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnEndpointAttributeStatus) SetMessage(v string) *ClientVpnEndpointAttributeStatus { + s.Message = &v + return s +} + // Describes the state of a Client VPN endpoint. type ClientVpnEndpointStatus struct { _ struct{} `type:"structure"` @@ -45867,29 +46479,25 @@ type CopyImageInput struct { // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) to use when creating the encrypted volume. This parameter - // is only required if you want to use a non-default CMK; if this parameter - // is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, - // the Encrypted flag must also be set. + // The identifier of the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) to use when creating encrypted volumes. If this parameter + // is not specified, your AWS managed CMK for EBS is used. If you specify a + // CMK, you must also set the encrypted state to true. // - // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, - // or alias ARN. When using an alias name, prefix it with "alias/". For example: + // You can specify a CMK using any of the following: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // * Key alias. For example, alias/ExampleAlias. // - // * Alias name: alias/ExampleAlias + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId asynchronously, meaning that the action you call may - // appear to complete even though you provided an invalid identifier. This action - // will eventually report failure. + // AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier + // that is not valid, the action can appear to complete, but eventually fails. // - // The specified CMK must exist in the Region that the snapshot is being copied - // to. + // The specified CMK must exist in the destination Region. // // Amazon EBS does not support asymmetric CMKs. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` @@ -46048,11 +46656,11 @@ type CopySnapshotInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -46617,6 +47225,9 @@ type CreateClientVpnEndpointInput struct { // ClientCidrBlock is a required field ClientCidrBlock *string `type:"string" required:"true"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` @@ -46655,6 +47266,11 @@ type CreateClientVpnEndpointInput struct { // must also specify the ID of the VPC that contains the security groups. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + // Specify whether to enable the self-service portal for the Client VPN endpoint. + // + // Default Value: enabled + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + // The ARN of the server certificate. For more information, see the AWS Certificate // Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). // @@ -46735,6 +47351,12 @@ func (s *CreateClientVpnEndpointInput) SetClientCidrBlock(v string) *CreateClien return s } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *CreateClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *CreateClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreateClientVpnEndpointInput) SetClientToken(v string) *CreateClientVpnEndpointInput { s.ClientToken = &v @@ -46771,6 +47393,12 @@ func (s *CreateClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *CreateC return s } +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *CreateClientVpnEndpointInput) SetSelfServicePortal(v string) *CreateClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *CreateClientVpnEndpointInput) SetServerCertificateArn(v string) *CreateClientVpnEndpointInput { s.ServerCertificateArn = &v @@ -47507,7 +48135,7 @@ type CreateFleetInput struct { // The key-value pair for tagging the EC2 Fleet request on creation. The value // for ResourceType must be fleet, otherwise the fleet request fails. To tag // instances at launch, specify the tags in the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). - // For information about tagging after launch, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + // For information about tagging after launch, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The number of units to request. @@ -47519,13 +48147,22 @@ type CreateFleetInput struct { // expires. TerminateInstancesWithExpiration *bool `type:"boolean"` - // The type of the request. By default, the EC2 Fleet places an asynchronous - // request for your desired capacity, and maintains it by replenishing interrupted - // Spot Instances (maintain). A value of instant places a synchronous one-time - // request, and returns errors for any instances that could not be launched. - // A value of request places an asynchronous one-time request without maintaining - // capacity or submitting requests in alternative capacity pools if capacity - // is unavailable. For more information, see EC2 Fleet Request Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type) + // The type of request. The default value is maintain. + // + // * maintain - The EC2 Fleet plaees an asynchronous request for your desired + // capacity, and continues to maintain your desired Spot capacity by replenishing + // interrupted Spot Instances. + // + // * request - The EC2 Fleet places an asynchronous one-time request for + // your desired capacity, but does submit Spot requests in alternative capacity + // pools if Spot capacity is unavailable, and does not maintain Spot capacity + // if Spot Instances are interrupted. + // + // * instant - The EC2 Fleet places a synchronous one-time request for your + // desired capacity, and returns errors for any instances that could not + // be launched. + // + // For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type) // in the Amazon Elastic Compute Cloud User Guide. Type *string `type:"string" enum:"FleetType"` @@ -48267,7 +48904,9 @@ type CreateInstanceExportTaskInput struct { Description *string `locationName:"description" type:"string"` // The format and location for an instance export task. - ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + // + // ExportToS3Task is a required field + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure" required:"true"` // The ID of the instance. // @@ -48278,7 +48917,9 @@ type CreateInstanceExportTaskInput struct { TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The target virtualization environment. - TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` + // + // TargetEnvironment is a required field + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" required:"true" enum:"ExportEnvironment"` } // String returns the string representation @@ -48294,9 +48935,15 @@ func (s CreateInstanceExportTaskInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateInstanceExportTaskInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateInstanceExportTaskInput"} + if s.ExportToS3Task == nil { + invalidParams.Add(request.NewErrParamRequired("ExportToS3Task")) + } if s.InstanceId == nil { invalidParams.Add(request.NewErrParamRequired("InstanceId")) } + if s.TargetEnvironment == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEnvironment")) + } if invalidParams.Len() > 0 { return invalidParams @@ -50071,6 +50718,9 @@ type CreateRouteInput struct { // The ID of a transit gateway. TransitGatewayId *string `type:"string"` + // The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only. + VpcEndpointId *string `type:"string"` + // The ID of a VPC peering connection. VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` } @@ -50176,6 +50826,12 @@ func (s *CreateRouteInput) SetTransitGatewayId(v string) *CreateRouteInput { return s } +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *CreateRouteInput) SetVpcEndpointId(v string) *CreateRouteInput { + s.VpcEndpointId = &v + return s +} + // SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. func (s *CreateRouteInput) SetVpcPeeringConnectionId(v string) *CreateRouteInput { s.VpcPeeringConnectionId = &v @@ -50585,7 +51241,10 @@ func (s *CreateSnapshotsOutput) SetSnapshots(v []*SnapshotInfo) *CreateSnapshots type CreateSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket in which to store the Spot Instance data feed. + // The name of the Amazon S3 bucket in which to store the Spot Instance data + // feed. For more information about bucket names, see Rules for bucket naming + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) + // in the Amazon S3 Developer Guide. // // Bucket is a required field Bucket *string `locationName:"bucket" type:"string" required:"true"` @@ -50596,7 +51255,7 @@ type CreateSpotDatafeedSubscriptionInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // A prefix for the data feed file names. + // The prefix for the data feed file names. Prefix *string `locationName:"prefix" type:"string"` } @@ -52178,10 +52837,15 @@ func (s *CreateTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment type CreateTransitGatewayVpcAttachmentRequestOptions struct { _ struct{} `type:"structure"` + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + // Enable or disable DNS support. The default is enable. DnsSupport *string `type:"string" enum:"DnsSupportValue"` - // Enable or disable IPv6 support. The default is enable. + // Enable or disable IPv6 support. Ipv6Support *string `type:"string" enum:"Ipv6SupportValue"` } @@ -52195,6 +52859,12 @@ func (s CreateTransitGatewayVpcAttachmentRequestOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { s.DnsSupport = &v @@ -52249,11 +52919,11 @@ type CreateVolumeInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -52594,9 +53264,10 @@ type CreateVpcEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. If this parameter is not specified, - // we attach a default policy that allows full access to the service. + // (Interface and gateway endpoints) A policy to attach to the endpoint that + // controls access to the service. The policy must be in valid JSON format. + // If this parameter is not specified, we attach a default policy that allows + // full access to the service. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicates whether to associate a private hosted zone @@ -52627,8 +53298,9 @@ type CreateVpcEndpointInput struct { // ServiceName is a required field ServiceName *string `type:"string" required:"true"` - // (Interface endpoint) The ID of one or more subnets in which to create an - // endpoint network interface. + // (Interface and Gateway Load Balancer endpoints) The ID of one or more subnets + // in which to create an endpoint network interface. For a Gateway Load Balancer + // endpoint, you can specify one subnet only. SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"` // The tags to associate with the endpoint. @@ -52788,13 +53460,15 @@ type CreateVpcEndpointServiceConfigurationInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers. + GatewayLoadBalancerArns []*string `locationName:"GatewayLoadBalancerArn" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of one or more Network Load Balancers for // your service. - // - // NetworkLoadBalancerArns is a required field - NetworkLoadBalancerArns []*string `locationName:"NetworkLoadBalancerArn" locationNameList:"item" type:"list" required:"true"` + NetworkLoadBalancerArns []*string `locationName:"NetworkLoadBalancerArn" locationNameList:"item" type:"list"` - // The private DNS name to assign to the VPC endpoint service. + // (Interface endpoint configuration) The private DNS name to assign to the + // VPC endpoint service. PrivateDnsName *string `type:"string"` // The tags to associate with the service. @@ -52811,19 +53485,6 @@ func (s CreateVpcEndpointServiceConfigurationInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateVpcEndpointServiceConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateVpcEndpointServiceConfigurationInput"} - if s.NetworkLoadBalancerArns == nil { - invalidParams.Add(request.NewErrParamRequired("NetworkLoadBalancerArns")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetAcceptanceRequired sets the AcceptanceRequired field's value. func (s *CreateVpcEndpointServiceConfigurationInput) SetAcceptanceRequired(v bool) *CreateVpcEndpointServiceConfigurationInput { s.AcceptanceRequired = &v @@ -52842,6 +53503,12 @@ func (s *CreateVpcEndpointServiceConfigurationInput) SetDryRun(v bool) *CreateVp return s } +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetGatewayLoadBalancerArns(v []*string) *CreateVpcEndpointServiceConfigurationInput { + s.GatewayLoadBalancerArns = v + return s +} + // SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. func (s *CreateVpcEndpointServiceConfigurationInput) SetNetworkLoadBalancerArns(v []*string) *CreateVpcEndpointServiceConfigurationInput { s.NetworkLoadBalancerArns = v @@ -54159,8 +54826,14 @@ type DeleteFleetsInput struct { // FleetIds is a required field FleetIds []*string `locationName:"FleetId" type:"list" required:"true"` - // Indicates whether to terminate instances for an EC2 Fleet if it is deleted - // successfully. + // Indicates whether to terminate the instances when the EC2 Fleet is deleted. + // The default is to terminate the instances. + // + // To let the instances continue to run after the EC2 Fleet is deleted, specify + // NoTerminateInstances. Supported only for fleets of type maintain and request. + // + // For instant fleets, you cannot specify NoTerminateInstances. A deleted instant + // fleet with running instances is not supported. // // TerminateInstances is a required field TerminateInstances *bool `type:"boolean" required:"true"` @@ -61010,7 +61683,7 @@ type DescribeFlowLogsInput struct { // // * log-destination-type - The type of destination to which the flow log // publishes data. Possible destination types include cloud-watch-logs and - // S3. + // s3. // // * flow-log-id - The ID of the flow log. // @@ -62529,6 +63202,10 @@ type DescribeInstanceAttributeOutput struct { // Indicates whether enhanced networking with ENA is enabled. EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + // To enable the instance for AWS Nitro Enclaves, set this parameter to true; + // otherwise, set it to false. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // The security groups associated with the instance. Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` @@ -62601,6 +63278,12 @@ func (s *DescribeInstanceAttributeOutput) SetEnaSupport(v *AttributeBooleanValue return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *DescribeInstanceAttributeOutput) SetEnclaveOptions(v *EnclaveOptions) *DescribeInstanceAttributeOutput { + s.EnclaveOptions = v + return s +} + // SetGroups sets the Groups field's value. func (s *DescribeInstanceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeInstanceAttributeOutput { s.Groups = v @@ -63002,7 +63685,7 @@ type DescribeInstanceTypeOfferingsInput struct { // type is region (default), the location is the Region code (for example, // us-east-2.) // - // * instance-type - The instance type. + // * instance-type - The instance type. For example, c5.2xlarge. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The location type. @@ -63114,52 +63797,52 @@ type DescribeInstanceTypesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * auto-recovery-supported - Indicates whether auto recovery is supported. - // (true | false) + // * auto-recovery-supported - Indicates whether auto recovery is supported + // (true | false). // - // * bare-metal - Indicates whether it is a bare metal instance type. (true - // | false) + // * bare-metal - Indicates whether it is a bare metal instance type (true + // | false). // // * burstable-performance-supported - Indicates whether it is a burstable - // performance instance type. (true | false) + // performance instance type (true | false). // // * current-generation - Indicates whether this instance type is the latest - // generation instance type of an instance family. (true | false) + // generation instance type of an instance family (true | false). // // * ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline // bandwidth performance for an EBS-optimized instance type, in Mbps. // - // * ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline - // throughput performance for an EBS-optimized instance type, in MBps. - // // * ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output // storage operations per second for an EBS-optimized instance type. // + // * ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline + // throughput performance for an EBS-optimized instance type, in MB/s. + // // * ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum // bandwidth performance for an EBS-optimized instance type, in Mbps. // - // * ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum - // throughput performance for an EBS-optimized instance type, in MBps. - // // * ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output // storage operations per second for an EBS-optimized instance type. // + // * ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum + // throughput performance for an EBS-optimized instance type, in MB/s. + // // * ebs-info.ebs-optimized-support - Indicates whether the instance type - // is EBS-optimized. (supported | unsupported | default) + // is EBS-optimized (supported | unsupported | default). // - // * ebs-info.encryption-support - Indicates whether EBS encryption is supported. - // (supported | unsupported) + // * ebs-info.encryption-support - Indicates whether EBS encryption is supported + // (supported | unsupported). // // * ebs-info.nvme-support - Indicates whether non-volatile memory express - // (NVMe) is supported or required. (required | supported | unsupported) + // (NVMe) is supported for EBS volumes (required | supported | unsupported). // // * free-tier-eligible - Indicates whether the instance type is eligible - // to use in the free tier. (true | false) + // to use in the free tier (true | false). // - // * hibernation-supported - Indicates whether On-Demand hibernation is supported. - // (true | false) + // * hibernation-supported - Indicates whether On-Demand hibernation is supported + // (true | false). // - // * hypervisor - The hypervisor used. (nitro | xen) + // * hypervisor - The hypervisor (nitro | xen). // // * instance-storage-info.disk.count - The number of local disks. // @@ -63167,21 +63850,27 @@ type DescribeInstanceTypesInput struct { // storage disk, in GB. // // * instance-storage-info.disk.type - The storage technology for the local - // instance storage disks. (hdd | ssd) + // instance storage disks (hdd | ssd). + // + // * instance-storage-info.nvme-support - Indicates whether non-volatile + // memory express (NVMe) is supported for instance store (required | supported) + // | unsupported). // // * instance-storage-info.total-size-in-gb - The total amount of storage // available from all local instance storage, in GB. // // * instance-storage-supported - Indicates whether the instance type has - // local instance storage. (true | false) + // local instance storage (true | false). // - // * memory-info.size-in-mib - The memory size. + // * instance-type - The instance type (for example c5.2xlarge or c5*). // - // * network-info.ena-support - Indicates whether Elastic Network Adapter - // (ENA) is supported or required. (required | supported | unsupported) + // * memory-info.size-in-mib - The memory size. // // * network-info.efa-supported - Indicates whether the instance type supports - // Elastic Fabric Adapter (EFA). (true | false) + // Elastic Fabric Adapter (EFA) (true | false). + // + // * network-info.ena-support - Indicates whether Elastic Network Adapter + // (ENA) is supported or required (required | supported | unsupported). // // * network-info.ipv4-addresses-per-interface - The maximum number of private // IPv4 addresses per network interface. @@ -63190,16 +63879,26 @@ type DescribeInstanceTypesInput struct { // IPv6 addresses per network interface. // // * network-info.ipv6-supported - Indicates whether the instance type supports - // IPv6. (true | false) + // IPv6 (true | false). // // * network-info.maximum-network-interfaces - The maximum number of network // interfaces per instance. // - // * network-info.network-performance - Describes the network performance. + // * network-info.network-performance - The network performance (for example, + // "25 Gigabit"). + // + // * processor-info.supported-architecture - The CPU architecture (arm64 + // | i386 | x86_64). // // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in // GHz. // + // * supported-root-device-type - The root device type (ebs | instance-store). + // + // * supported-usage-class - The usage class (on-demand | spot). + // + // * supported-virtualization-type - The virtualization type (hvm | paravirtual). + // // * vcpu-info.default-cores - The default number of cores for the instance // type. // @@ -63208,6 +63907,12 @@ type DescribeInstanceTypesInput struct { // // * vcpu-info.default-vcpus - The default number of vCPUs for the instance // type. + // + // * vcpu-info.valid-cores - The number of cores that can be configured for + // the instance type. + // + // * vcpu-info.valid-threads-per-core - The number of threads per core that + // can be configured for the instance type. For example, "1" or "1,2". Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The instance types. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) @@ -68694,8 +69399,9 @@ type DescribeSpotPriceHistoryInput struct { // * instance-type - The type of instance (for example, m3.medium). // // * product-description - The product description for the Spot price (Linux/UNIX - // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon - // VPC) | Windows (Amazon VPC)). + // | Red Hat Enterprise Linux | SUSE Linux | Windows | Linux/UNIX (Amazon + // VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). // // * spot-price - The Spot price. The value must match exactly (or use wildcards; // greater than or less than comparison is not supported). @@ -69827,6 +70533,16 @@ type DescribeTransitGatewayPeeringAttachmentsInput struct { // | deleted | deleting | failed | failing | initiatingRequest | modifying // | pendingAcceptance | pending | rollingBack | rejected | rejecting). // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. + // // * transit-gateway-id - The ID of the transit gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -71698,6 +72414,9 @@ type DescribeVpcEndpointsInput struct { // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | // pending | available | deleting | deleted | rejected | failed). // + // * vpc-endpoint-type - The type of VPC endpoint (Interface | Gateway | + // GatewayLoadBalancer). + // // * tag: - The key/value combination of a tag assigned to the resource. // Use the tag key in the filter name and the tag value as the filter value. // For example, to find all resources that have a tag with the key Owner @@ -73667,6 +74386,89 @@ func (s *DisassociateClientVpnTargetNetworkOutput) SetStatus(v *AssociationStatu return s } +type DisassociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate from which to disassociate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to disassociate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *DisassociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type DisassociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisassociateEnclaveCertificateIamRoleOutput) SetReturn(v bool) *DisassociateEnclaveCertificateIamRoleOutput { + s.Return = &v + return s +} + type DisassociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -74578,7 +75380,7 @@ type EbsInfo struct { // Describes the optimized EBS performance for the instance type. EbsOptimizedInfo *EbsOptimizedInfo `locationName:"ebsOptimizedInfo" type:"structure"` - // Indicates that the instance type is Amazon EBS-optimized. For more information, + // Indicates whether the instance type is Amazon EBS-optimized. For more information, // see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) // in Amazon EC2 User Guide for Linux Instances. EbsOptimizedSupport *string `locationName:"ebsOptimizedSupport" type:"string" enum:"EbsOptimizedSupport"` @@ -74722,7 +75524,7 @@ type EbsOptimizedInfo struct { BaselineIops *int64 `locationName:"baselineIops" type:"integer"` // The baseline throughput performance for an EBS-optimized instance type, in - // MBps. + // MB/s. BaselineThroughputInMBps *float64 `locationName:"baselineThroughputInMBps" type:"double"` // The maximum bandwidth performance for an EBS-optimized instance type, in @@ -74734,7 +75536,7 @@ type EbsOptimizedInfo struct { MaximumIops *int64 `locationName:"maximumIops" type:"integer"` // The maximum throughput performance for an EBS-optimized instance type, in - // MBps. + // MB/s. MaximumThroughputInMBps *float64 `locationName:"maximumThroughputInMBps" type:"double"` } @@ -75874,6 +76676,57 @@ func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutp return s } +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type EnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptions) SetEnabled(v bool) *EnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type EnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptionsRequest) SetEnabled(v bool) *EnclaveOptionsRequest { + s.Enabled = &v + return s +} + // Describes an EC2 Fleet or Spot Fleet event. type EventInformation struct { _ struct{} `type:"structure"` @@ -76869,12 +77722,16 @@ func (s *FailedQueuedPurchaseDeletion) SetReservedInstancesId(v string) *FailedQ return s } -// Describes the IAM SAML identity provider used for federated authentication. +// Describes the IAM SAML identity providers used for federated authentication. type FederatedAuthentication struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the IAM SAML identity provider. SamlProviderArn *string `locationName:"samlProviderArn" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSamlProviderArn *string `locationName:"selfServiceSamlProviderArn" type:"string"` } // String returns the string representation @@ -76893,12 +77750,22 @@ func (s *FederatedAuthentication) SetSamlProviderArn(v string) *FederatedAuthent return s } +// SetSelfServiceSamlProviderArn sets the SelfServiceSamlProviderArn field's value. +func (s *FederatedAuthentication) SetSelfServiceSamlProviderArn(v string) *FederatedAuthentication { + s.SelfServiceSamlProviderArn = &v + return s +} + // The IAM SAML identity provider used for federated authentication. type FederatedAuthenticationRequest struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the IAM SAML identity provider. SAMLProviderArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSAMLProviderArn *string `type:"string"` } // String returns the string representation @@ -76917,6 +77784,12 @@ func (s *FederatedAuthenticationRequest) SetSAMLProviderArn(v string) *Federated return s } +// SetSelfServiceSAMLProviderArn sets the SelfServiceSAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSelfServiceSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SelfServiceSAMLProviderArn = &v + return s +} + // A filter name and value pair that is used to return a more specific list // of results from a describe operation. Filters can be used to match a set // of resources by specific criteria, such as tags, attributes, or IDs. The @@ -77584,6 +78457,124 @@ func (s *FleetLaunchTemplateSpecificationRequest) SetVersion(v string) *FleetLau return s } +// The strategy to use when Amazon EC2 emits a signal that your Spot Instance +// is at an elevated risk of being interrupted. +type FleetSpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. Only available for fleets of type maintain. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for both instances while they are running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalance) SetReplacementStrategy(v string) *FleetSpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-capacity-rebalance) +// in the Amazon Elastic Compute Cloud User Guide. +type FleetSpotCapacityRebalanceRequest struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. You must specify a value, otherwise you get an error. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for all instances while they are running. + ReplacementStrategy *string `type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalanceRequest) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalanceRequest) SetReplacementStrategy(v string) *FleetSpotCapacityRebalanceRequest { + s.ReplacementStrategy = &v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategies) SetCapacityRebalance(v *FleetSpotCapacityRebalance) *FleetSpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategiesRequest struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalanceRequest `type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategiesRequest) SetCapacityRebalance(v *FleetSpotCapacityRebalanceRequest) *FleetSpotMaintenanceStrategiesRequest { + s.CapacityRebalance = v + return s +} + // Describes a flow log. type FlowLog struct { _ struct{} `type:"structure"` @@ -77798,7 +78789,7 @@ func (s *FpgaDeviceInfo) SetName(v string) *FpgaDeviceInfo { type FpgaDeviceMemoryInfo struct { _ struct{} `type:"structure"` - // The size (in MiB) for the memory available to the FPGA accelerator. + // The size of the memory available to the FPGA accelerator, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` } @@ -78103,6 +79094,78 @@ func (s *FpgaInfo) SetTotalFpgaMemoryInMiB(v int64) *FpgaInfo { return s } +type GetAssociatedEnclaveCertificateIamRolesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate for which to view the associated IAM roles, + // encryption keys, and Amazon S3 object information. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAssociatedEnclaveCertificateIamRolesInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetCertificateArn(v string) *GetAssociatedEnclaveCertificateIamRolesInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetDryRun(v bool) *GetAssociatedEnclaveCertificateIamRolesInput { + s.DryRun = &v + return s +} + +type GetAssociatedEnclaveCertificateIamRolesOutput struct { + _ struct{} `type:"structure"` + + // Information about the associated IAM roles. + AssociatedRoles []*AssociatedRole `locationName:"associatedRoleSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) GoString() string { + return s.String() +} + +// SetAssociatedRoles sets the AssociatedRoles field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesOutput) SetAssociatedRoles(v []*AssociatedRole) *GetAssociatedEnclaveCertificateIamRolesOutput { + s.AssociatedRoles = v + return s +} + type GetAssociatedIpv6PoolCidrsInput struct { _ struct{} `type:"structure"` @@ -80306,7 +81369,7 @@ func (s *GpuDeviceInfo) SetName(v string) *GpuDeviceInfo { type GpuDeviceMemoryInfo struct { _ struct{} `type:"structure"` - // The size (in MiB) for the memory available to the GPU accelerator. + // The size of the memory available to the GPU accelerator, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` } @@ -80333,7 +81396,8 @@ type GpuInfo struct { // Describes the GPU accelerators for the instance type. Gpus []*GpuDeviceInfo `locationName:"gpus" locationNameList:"item" type:"list"` - // The total size of the memory for the GPU accelerators for the instance type. + // The total size of the memory for the GPU accelerators for the instance type, + // in MiB. TotalGpuMemoryInMiB *int64 `locationName:"totalGpuMemoryInMiB" type:"integer"` } @@ -83269,6 +84333,9 @@ type Instance struct { // Specifies whether enhanced networking with ENA is enabled. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // Indicates whether the instance is enabled for hibernation. HibernationOptions *HibernationOptions `locationName:"hibernationOptions" type:"structure"` @@ -83476,6 +84543,12 @@ func (s *Instance) SetEnaSupport(v bool) *Instance { return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *Instance) SetEnclaveOptions(v *EnclaveOptions) *Instance { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *Instance) SetHibernationOptions(v *HibernationOptions) *Instance { s.HibernationOptions = v @@ -84475,6 +85548,9 @@ type InstanceNetworkInterfaceAttachment struct { // The index of the device on the instance for the network interface attachment. DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The attachment state. Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` } @@ -84513,6 +85589,12 @@ func (s *InstanceNetworkInterfaceAttachment) SetDeviceIndex(v int64) *InstanceNe return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + // SetStatus sets the Status field's value. func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetworkInterfaceAttachment { s.Status = &v @@ -84557,8 +85639,10 @@ type InstanceNetworkInterfaceSpecification struct { // creating a network interface when launching an instance. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // The type of network interface. To create an Elastic Fabric Adapter (EFA), - // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // The type of network interface. + // + // To create an Elastic Fabric Adapter (EFA), specify efa. For more information, + // see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) // in the Amazon Elastic Compute Cloud User Guide. // // If you are not creating an EFA, specify interface or omit this parameter. @@ -84579,6 +85663,11 @@ type InstanceNetworkInterfaceSpecification struct { // number of instances to launch. Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" queryName:"Ipv6Addresses" locationNameList:"item" type:"list"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. // // If you are creating a Spot Fleet, omit this parameter because you can’t @@ -84674,6 +85763,12 @@ func (s *InstanceNetworkInterfaceSpecification) SetIpv6Addresses(v []*InstanceIp return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *InstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *InstanceNetworkInterfaceSpecification { s.NetworkInterfaceId = &v @@ -85124,9 +86219,13 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { type InstanceStorageInfo struct { _ struct{} `type:"structure"` - // Array describing the disks that are available for the instance type. + // Describes the disks that are available for the instance type. Disks []*DiskInfo `locationName:"disks" locationNameList:"item" type:"list"` + // Indicates whether non-volatile memory express (NVMe) is supported for instance + // store. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EphemeralNvmeSupport"` + // The total size of the disks, in GB. TotalSizeInGB *int64 `locationName:"totalSizeInGB" type:"long"` } @@ -85147,6 +86246,12 @@ func (s *InstanceStorageInfo) SetDisks(v []*DiskInfo) *InstanceStorageInfo { return s } +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *InstanceStorageInfo) SetNvmeSupport(v string) *InstanceStorageInfo { + s.NvmeSupport = &v + return s +} + // SetTotalSizeInGB sets the TotalSizeInGB field's value. func (s *InstanceStorageInfo) SetTotalSizeInGB(v int64) *InstanceStorageInfo { s.TotalSizeInGB = &v @@ -85195,13 +86300,13 @@ type InstanceTypeInfo struct { // Indicates whether auto recovery is supported. AutoRecoverySupported *bool `locationName:"autoRecoverySupported" type:"boolean"` - // Indicates whether the instance is bare metal. + // Indicates whether the instance is a bare metal instance type. BareMetal *bool `locationName:"bareMetal" type:"boolean"` // Indicates whether the instance type is a burstable performance instance type. BurstablePerformanceSupported *bool `locationName:"burstablePerformanceSupported" type:"boolean"` - // Indicates whether the instance type is a current generation. + // Indicates whether the instance type is current generation. CurrentGeneration *bool `locationName:"currentGeneration" type:"boolean"` // Indicates whether Dedicated Hosts are supported on the instance type. @@ -85222,13 +86327,13 @@ type InstanceTypeInfo struct { // Indicates whether On-Demand hibernation is supported. HibernationSupported *bool `locationName:"hibernationSupported" type:"boolean"` - // Indicates the hypervisor used for the instance type. + // The hypervisor for the instance type. Hypervisor *string `locationName:"hypervisor" type:"string" enum:"InstanceTypeHypervisor"` // Describes the Inference accelerator settings for the instance type. InferenceAcceleratorInfo *InferenceAcceleratorInfo `locationName:"inferenceAcceleratorInfo" type:"structure"` - // Describes the disks for the instance type. + // Describes the instance storage for the instance type. InstanceStorageInfo *InstanceStorageInfo `locationName:"instanceStorageInfo" type:"structure"` // Indicates whether instance storage is supported. @@ -85250,7 +86355,7 @@ type InstanceTypeInfo struct { // Describes the processor. ProcessorInfo *ProcessorInfo `locationName:"processorInfo" type:"structure"` - // Indicates the supported root device types. + // The supported root device types. SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"` // Indicates whether the instance type is offered for spot or On-Demand. @@ -86847,6 +87952,57 @@ func (s *LaunchTemplateElasticInferenceAcceleratorResponse) SetType(v string) *L return s } +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type LaunchTemplateEnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptions) SetEnabled(v bool) *LaunchTemplateEnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type LaunchTemplateEnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptionsRequest) SetEnabled(v bool) *LaunchTemplateEnclaveOptionsRequest { + s.Enabled = &v + return s +} + // Indicates whether an instance is configured for hibernation. type LaunchTemplateHibernationOptions struct { _ struct{} `type:"structure"` @@ -87213,6 +88369,9 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // The IPv6 addresses for the network interface. Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The ID of the network interface. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` @@ -87293,6 +88452,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6Addresses(v return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { s.NetworkInterfaceId = &v @@ -87368,6 +88533,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // subnet. You can't use this option if you're specifying a number of IPv6 addresses. Ipv6Addresses []*InstanceIpv6AddressRequest `locationNameList:"InstanceIpv6Address" type:"list"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. NetworkInterfaceId *string `type:"string"` @@ -87448,6 +88618,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6Addr return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.NetworkInterfaceId = &v @@ -89020,7 +90196,7 @@ func (s *ManagedPrefixList) SetVersion(v int64) *ManagedPrefixList { type MemoryInfo struct { _ struct{} `type:"structure"` - // Size of the memory, in MiB. + // The size of the memory, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"long"` } @@ -89251,6 +90427,9 @@ func (s *ModifyCapacityReservationOutput) SetReturn(v bool) *ModifyCapacityReser type ModifyClientVpnEndpointInput struct { _ struct{} `type:"structure"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + // The ID of the Client VPN endpoint to modify. // // ClientVpnEndpointId is a required field @@ -89286,6 +90465,9 @@ type ModifyClientVpnEndpointInput struct { // The IDs of one or more security groups to apply to the target network. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + // Specify whether to enable the self-service portal for the Client VPN endpoint. + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + // The ARN of the server certificate to be used. The server certificate must // be provisioned in AWS Certificate Manager (ACM). ServerCertificateArn *string `type:"string"` @@ -89331,6 +90513,12 @@ func (s *ModifyClientVpnEndpointInput) Validate() error { return nil } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ModifyClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *ModifyClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *ModifyClientVpnEndpointInput) SetClientVpnEndpointId(v string) *ModifyClientVpnEndpointInput { s.ClientVpnEndpointId = &v @@ -89367,6 +90555,12 @@ func (s *ModifyClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *ModifyC return s } +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *ModifyClientVpnEndpointInput) SetSelfServicePortal(v string) *ModifyClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *ModifyClientVpnEndpointInput) SetServerCertificateArn(v string) *ModifyClientVpnEndpointInput { s.ServerCertificateArn = &v @@ -89519,11 +90713,11 @@ type ModifyEbsDefaultKmsKeyIdInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -89618,9 +90812,7 @@ type ModifyFleetInput struct { LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` // The size of the EC2 Fleet. - // - // TargetCapacitySpecification is a required field - TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure"` } // String returns the string representation @@ -89639,9 +90831,6 @@ func (s *ModifyFleetInput) Validate() error { if s.FleetId == nil { invalidParams.Add(request.NewErrParamRequired("FleetId")) } - if s.TargetCapacitySpecification == nil { - invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) - } if s.LaunchTemplateConfigs != nil { for i, v := range s.LaunchTemplateConfigs { if v == nil { @@ -92600,6 +93789,11 @@ func (s *ModifyTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment type ModifyTransitGatewayVpcAttachmentRequestOptions struct { _ struct{} `type:"structure"` + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + // Enable or disable DNS support. The default is enable. DnsSupport *string `type:"string" enum:"DnsSupportValue"` @@ -92617,6 +93811,12 @@ func (s ModifyTransitGatewayVpcAttachmentRequestOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { s.DnsSupport = &v @@ -92998,7 +94198,9 @@ type ModifyVpcEndpointInput struct { // network interface. AddSecurityGroupIds []*string `locationName:"AddSecurityGroupId" locationNameList:"item" type:"list"` - // (Interface endpoint) One or more subnet IDs in which to serve the endpoint. + // (Interface and Gateway Load Balancer endpoints) One or more subnet IDs in + // which to serve the endpoint. For a Gateway Load Balancer endpoint, you can + // specify only one subnet. AddSubnetIds []*string `locationName:"AddSubnetId" locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -93007,8 +94209,8 @@ type ModifyVpcEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. + // (Interface and gateway endpoints) A policy to attach to the endpoint that + // controls access to the service. The policy must be in valid JSON format. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicates whether a private hosted zone is associated @@ -93154,6 +94356,10 @@ type ModifyVpcEndpointServiceConfigurationInput struct { // accepted. AcceptanceRequired *bool `type:"boolean"` + // The Amazon Resource Names (ARNs) of Gateway Load Balancers to add to your + // service configuration. + AddGatewayLoadBalancerArns []*string `locationName:"AddGatewayLoadBalancerArn" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of Network Load Balancers to add to your // service configuration. AddNetworkLoadBalancerArns []*string `locationName:"AddNetworkLoadBalancerArn" locationNameList:"item" type:"list"` @@ -93164,14 +94370,20 @@ type ModifyVpcEndpointServiceConfigurationInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The private DNS name to assign to the endpoint service. + // (Interface endpoint configuration) The private DNS name to assign to the + // endpoint service. PrivateDnsName *string `type:"string"` + // The Amazon Resource Names (ARNs) of Gateway Load Balancers to remove from + // your service configuration. + RemoveGatewayLoadBalancerArns []*string `locationName:"RemoveGatewayLoadBalancerArn" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of Network Load Balancers to remove from // your service configuration. RemoveNetworkLoadBalancerArns []*string `locationName:"RemoveNetworkLoadBalancerArn" locationNameList:"item" type:"list"` - // Removes the private DNS name of the endpoint service. + // (Interface endpoint configuration) Removes the private DNS name of the endpoint + // service. RemovePrivateDnsName *bool `type:"boolean"` // The ID of the service. @@ -93209,6 +94421,12 @@ func (s *ModifyVpcEndpointServiceConfigurationInput) SetAcceptanceRequired(v boo return s } +// SetAddGatewayLoadBalancerArns sets the AddGatewayLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetAddGatewayLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.AddGatewayLoadBalancerArns = v + return s +} + // SetAddNetworkLoadBalancerArns sets the AddNetworkLoadBalancerArns field's value. func (s *ModifyVpcEndpointServiceConfigurationInput) SetAddNetworkLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { s.AddNetworkLoadBalancerArns = v @@ -93227,6 +94445,12 @@ func (s *ModifyVpcEndpointServiceConfigurationInput) SetPrivateDnsName(v string) return s } +// SetRemoveGatewayLoadBalancerArns sets the RemoveGatewayLoadBalancerArns field's value. +func (s *ModifyVpcEndpointServiceConfigurationInput) SetRemoveGatewayLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { + s.RemoveGatewayLoadBalancerArns = v + return s +} + // SetRemoveNetworkLoadBalancerArns sets the RemoveNetworkLoadBalancerArns field's value. func (s *ModifyVpcEndpointServiceConfigurationInput) SetRemoveNetworkLoadBalancerArns(v []*string) *ModifyVpcEndpointServiceConfigurationInput { s.RemoveNetworkLoadBalancerArns = v @@ -94846,10 +96070,55 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { return s } +// Describes the network card support of the instance type. +type NetworkCardInfo struct { + _ struct{} `type:"structure"` + + // The maximum number of network interfaces for the network card. + MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The network performance of the network card. + NetworkPerformance *string `locationName:"networkPerformance" type:"string"` +} + +// String returns the string representation +func (s NetworkCardInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkCardInfo) GoString() string { + return s.String() +} + +// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. +func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo { + s.MaximumNetworkInterfaces = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkCardInfo) SetNetworkCardIndex(v int64) *NetworkCardInfo { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkPerformance sets the NetworkPerformance field's value. +func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo { + s.NetworkPerformance = &v + return s +} + // Describes the networking features of the instance type. type NetworkInfo struct { _ struct{} `type:"structure"` + // The index of the default network card, starting at 0. + DefaultNetworkCardIndex *int64 `locationName:"defaultNetworkCardIndex" type:"integer"` + // Indicates whether Elastic Fabric Adapter (EFA) is supported. EfaSupported *bool `locationName:"efaSupported" type:"boolean"` @@ -94865,10 +96134,17 @@ type NetworkInfo struct { // Indicates whether IPv6 is supported. Ipv6Supported *bool `locationName:"ipv6Supported" type:"boolean"` + // The maximum number of physical network cards that can be allocated to the + // instance. + MaximumNetworkCards *int64 `locationName:"maximumNetworkCards" type:"integer"` + // The maximum number of network interfaces for the instance type. MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` - // Describes the network performance. + // Describes the network cards for the instance type. + NetworkCards []*NetworkCardInfo `locationName:"networkCards" locationNameList:"item" type:"list"` + + // The network performance. NetworkPerformance *string `locationName:"networkPerformance" type:"string"` } @@ -94882,6 +96158,12 @@ func (s NetworkInfo) GoString() string { return s.String() } +// SetDefaultNetworkCardIndex sets the DefaultNetworkCardIndex field's value. +func (s *NetworkInfo) SetDefaultNetworkCardIndex(v int64) *NetworkInfo { + s.DefaultNetworkCardIndex = &v + return s +} + // SetEfaSupported sets the EfaSupported field's value. func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { s.EfaSupported = &v @@ -94912,12 +96194,24 @@ func (s *NetworkInfo) SetIpv6Supported(v bool) *NetworkInfo { return s } +// SetMaximumNetworkCards sets the MaximumNetworkCards field's value. +func (s *NetworkInfo) SetMaximumNetworkCards(v int64) *NetworkInfo { + s.MaximumNetworkCards = &v + return s +} + // SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. func (s *NetworkInfo) SetMaximumNetworkInterfaces(v int64) *NetworkInfo { s.MaximumNetworkInterfaces = &v return s } +// SetNetworkCards sets the NetworkCards field's value. +func (s *NetworkInfo) SetNetworkCards(v []*NetworkCardInfo) *NetworkInfo { + s.NetworkCards = v + return s +} + // SetNetworkPerformance sets the NetworkPerformance field's value. func (s *NetworkInfo) SetNetworkPerformance(v string) *NetworkInfo { s.NetworkPerformance = &v @@ -95157,8 +96451,7 @@ type NetworkInterfaceAssociation struct { // The public DNS name. PublicDnsName *string `locationName:"publicDnsName" type:"string"` - // The address of the Elastic IP address or Carrier IP address bound to the - // network interface. + // The address of the Elastic IP address bound to the network interface. PublicIp *string `locationName:"publicIp" type:"string"` } @@ -95236,6 +96529,9 @@ type NetworkInterfaceAttachment struct { // The AWS account ID of the owner of the instance. InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The attachment state. Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` } @@ -95286,6 +96582,12 @@ func (s *NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *NetworkInterf return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *NetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + // SetStatus sets the Status field's value. func (s *NetworkInterfaceAttachment) SetStatus(v string) *NetworkInterfaceAttachment { s.Status = &v @@ -96386,7 +97688,7 @@ func (s *PlacementGroup) SetTags(v []*Tag) *PlacementGroup { type PlacementGroupInfo struct { _ struct{} `type:"structure"` - // A list of supported placement groups types. + // The supported placement group types. SupportedStrategies []*string `locationName:"supportedStrategies" locationNameList:"item" type:"list"` } @@ -96901,7 +98203,7 @@ func (s *PrivateIpAddressSpecification) SetPrivateIpAddress(v string) *PrivateIp type ProcessorInfo struct { _ struct{} `type:"structure"` - // A list of architectures supported by the instance type. + // The architectures supported by the instance type. SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list"` // The speed of the processor, in GHz. @@ -99277,6 +100579,9 @@ type ReplaceRouteInput struct { // The ID of a transit gateway. TransitGatewayId *string `type:"string"` + // The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only. + VpcEndpointId *string `type:"string"` + // The ID of a VPC peering connection. VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` } @@ -99388,6 +100693,12 @@ func (s *ReplaceRouteInput) SetTransitGatewayId(v string) *ReplaceRouteInput { return s } +// SetVpcEndpointId sets the VpcEndpointId field's value. +func (s *ReplaceRouteInput) SetVpcEndpointId(v string) *ReplaceRouteInput { + s.VpcEndpointId = &v + return s +} + // SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. func (s *ReplaceRouteInput) SetVpcPeeringConnectionId(v string) *ReplaceRouteInput { s.VpcPeeringConnectionId = &v @@ -99792,6 +101103,13 @@ type RequestLaunchTemplateData struct { // The elastic inference accelerator for the instance. ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *LaunchTemplateEnclaveOptionsRequest `type:"structure"` + // Indicates whether an instance is enabled for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) @@ -99973,6 +101291,12 @@ func (s *RequestLaunchTemplateData) SetElasticInferenceAccelerators(v []*LaunchT return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RequestLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptionsRequest) *RequestLaunchTemplateData { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *RequestLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptionsRequest) *RequestLaunchTemplateData { s.HibernationOptions = v @@ -100197,6 +101521,9 @@ type RequestSpotInstancesInput struct { // // You can't specify an Availability Zone group or a launch group if you specify // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -100253,11 +101580,16 @@ type RequestSpotInstancesInput struct { // date and time. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` - // The end date of the request. If this is a one-time request, the request remains - // active until all instances launch, the request is canceled, or this date - // is reached. If the request is persistent, it remains active until it is canceled - // or this date is reached. The default end date is 7 days from the current - // date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the ValidUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } @@ -100579,7 +101911,9 @@ func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunc return s } -// Describes a reservation. +// Describes a launch request for one or more instances, and includes owner, +// requester, and security group information that applies to all instances in +// the launch request. type Reservation struct { _ struct{} `type:"structure"` @@ -101925,6 +103259,9 @@ type ResponseLaunchTemplateData struct { // The elastic inference accelerator for the instance. ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAcceleratorResponse `locationName:"elasticInferenceAcceleratorSet" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *LaunchTemplateEnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // Indicates whether an instance is configured for hibernation. For more information, // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -102043,6 +103380,12 @@ func (s *ResponseLaunchTemplateData) SetElasticInferenceAccelerators(v []*Launch return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *ResponseLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptions) *ResponseLaunchTemplateData { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *ResponseLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptions) *ResponseLaunchTemplateData { s.HibernationOptions = v @@ -102559,6 +103902,13 @@ func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroup type RevokeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -102571,6 +103921,18 @@ func (s RevokeSecurityGroupEgressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupEgressOutput) SetReturn(v bool) *RevokeSecurityGroupEgressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupEgressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressOutput { + s.UnknownIpPermissions = v + return s +} + type RevokeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -102698,6 +104060,13 @@ func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGrou type RevokeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -102710,6 +104079,18 @@ func (s RevokeSecurityGroupIngressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupIngressOutput) SetReturn(v bool) *RevokeSecurityGroupIngressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupIngressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressOutput { + s.UnknownIpPermissions = v + return s +} + // Describes a route in a route table. type Route struct { _ struct{} `type:"structure"` @@ -103125,9 +104506,18 @@ type RunInstancesInput struct { // You cannot specify accelerators from different generations in the same request. ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *EnclaveOptionsRequest `type:"structure"` + // Indicates whether an instance is enabled for hibernation. For more information, // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. + // + // You can't enable hibernation and AWS Nitro Enclaves on the same instance. HibernationOptions *HibernationOptionsRequest `type:"structure"` // The IAM instance profile. @@ -103416,6 +104806,12 @@ func (s *RunInstancesInput) SetElasticInferenceAccelerators(v []*ElasticInferenc return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RunInstancesInput) SetEnclaveOptions(v *EnclaveOptionsRequest) *RunInstancesInput { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *RunInstancesInput) SetHibernationOptions(v *HibernationOptionsRequest) *RunInstancesInput { s.HibernationOptions = v @@ -105417,6 +106813,9 @@ type ServiceConfiguration struct { // The DNS names for the service. BaseEndpointDnsNames []*string `locationName:"baseEndpointDnsNameSet" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service. + GatewayLoadBalancerArns []*string `locationName:"gatewayLoadBalancerArnSet" locationNameList:"item" type:"list"` + // Indicates whether the service manages its VPC endpoints. Management of the // service VPC endpoints using the VPC endpoint API is restricted. ManagesVpcEndpoints *bool `locationName:"managesVpcEndpoints" type:"boolean"` @@ -105474,6 +106873,12 @@ func (s *ServiceConfiguration) SetBaseEndpointDnsNames(v []*string) *ServiceConf return s } +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *ServiceConfiguration) SetGatewayLoadBalancerArns(v []*string) *ServiceConfiguration { + s.GatewayLoadBalancerArns = v + return s +} + // SetManagesVpcEndpoints sets the ManagesVpcEndpoints field's value. func (s *ServiceConfiguration) SetManagesVpcEndpoints(v bool) *ServiceConfiguration { s.ManagesVpcEndpoints = &v @@ -105792,10 +107197,8 @@ type Snapshot struct { // key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // The AWS owner alias, as maintained by Amazon. The possible values are: amazon - // | self | all | aws-marketplace | microsoft. This AWS owner alias is not to - // be confused with the user-configured AWS account alias, which is set from - // the IAM console. + // The AWS owner alias, from an Amazon-maintained list (amazon). This is not + // the user-configured AWS account alias set using the IAM console. OwnerAlias *string `locationName:"ownerAlias" type:"string"` // The AWS account ID of the EBS snapshot owner. @@ -106308,11 +107711,47 @@ func (s *SnapshotTaskDetail) SetUserBucket(v *UserBucketDetails) *SnapshotTaskDe return s } +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#spot-fleet-capacity-rebalance) +// in the Amazon EC2 User Guide for Linux Instances. +type SpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // You must specify a value, otherwise you get an error. + // + // To allow Spot Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for a Spot Instance in the fleet, specify + // launch. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can leave it + // running. You are charged for all instances while they are running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"ReplacementStrategy"` +} + +// String returns the string representation +func (s SpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *SpotCapacityRebalance) SetReplacementStrategy(v string) *SpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + // Describes the data feed for a Spot Instance. type SpotDatafeedSubscription struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket where the Spot Instance data feed is located. + // The name of the Amazon S3 bucket where the Spot Instance data feed is located. Bucket *string `locationName:"bucket" type:"string"` // The fault codes for the Spot Instance request, if any. @@ -106321,7 +107760,7 @@ type SpotDatafeedSubscription struct { // The AWS account ID of the account. OwnerId *string `locationName:"ownerId" type:"string"` - // The prefix that is prepended to data feed files. + // The prefix for the data feed files. Prefix *string `locationName:"prefix" type:"string"` // The state of the Spot Instance data feed subscription. @@ -106369,8 +107808,9 @@ func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription } // Describes the launch specification for one or more Spot Instances. If you -// include On-Demand capacity in your fleet request, you can't use SpotFleetLaunchSpecification; -// you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). +// include On-Demand capacity in your fleet request or want to specify an EFA +// network device, you can't use SpotFleetLaunchSpecification; you must use +// LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). type SpotFleetLaunchSpecification struct { _ struct{} `type:"structure"` @@ -106413,6 +107853,9 @@ type SpotFleetLaunchSpecification struct { // One or more network interfaces. If you specify a network interface, you must // specify subnet IDs and security group IDs using the network interface. + // + // SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter + // (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The placement information. @@ -106778,6 +108221,10 @@ type SpotFleetRequestConfigData struct { // Indicates whether Spot Fleet should replace unhealthy instances. ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + SpotMaintenanceStrategies *SpotMaintenanceStrategies `locationName:"spotMaintenanceStrategies" type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. // You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, // or both parameters to ensure that your fleet cost does not exceed your budget. @@ -106967,6 +108414,12 @@ func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotF return s } +// SetSpotMaintenanceStrategies sets the SpotMaintenanceStrategies field's value. +func (s *SpotFleetRequestConfigData) SetSpotMaintenanceStrategies(v *SpotMaintenanceStrategies) *SpotFleetRequestConfigData { + s.SpotMaintenanceStrategies = v + return s +} + // SetSpotMaxTotalPrice sets the SpotMaxTotalPrice field's value. func (s *SpotFleetRequestConfigData) SetSpotMaxTotalPrice(v string) *SpotFleetRequestConfigData { s.SpotMaxTotalPrice = &v @@ -107117,11 +108570,16 @@ type SpotInstanceRequest struct { // The request becomes active at this date and time. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` - // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - // If this is a one-time request, it remains active until all instances launch, - // the request is canceled, or this date is reached. If the request is persistent, - // it remains active until it is canceled or this date is reached. The default - // end date is 7 days from the current date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the validUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the validUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } @@ -107326,6 +108784,32 @@ func (s *SpotInstanceStatus) SetUpdateTime(v time.Time) *SpotInstanceStatus { return s } +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type SpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *SpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s SpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *SpotMaintenanceStrategies) SetCapacityRebalance(v *SpotCapacityRebalance) *SpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + // The options for Spot Instances. type SpotMarketOptions struct { _ struct{} `type:"structure"` @@ -107333,6 +108817,17 @@ type SpotMarketOptions struct { // The required duration for the Spot Instances (also known as Spot blocks), // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, // or 360). + // + // The duration period starts as soon as your Spot Instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance + // for termination and provides a Spot Instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // You can't specify an Availability Zone group or a launch group if you specify + // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). BlockDurationMinutes *int64 `type:"integer"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -107347,11 +108842,15 @@ type SpotMarketOptions struct { // is set to either hibernate or stop. SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` - // The end date of the request. For a one-time request, the request remains - // active until all instances launch, the request is canceled, or this date - // is reached. If the request is persistent, it remains active until it is canceled - // or this date and time is reached. The default end date is 7 days from the - // current date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported + // only for persistent requests. + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, ValidUntil is not supported. The request remains + // active until all instances launch or you cancel the request. ValidUntil *time.Time `type:"timestamp"` } @@ -107423,6 +108922,10 @@ type SpotOptions struct { // the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + // The strategies for managing your workloads on your Spot Instances that will + // be interrupted. Currently only the capacity rebalance strategy is available. + MaintenanceStrategies *FleetSpotMaintenanceStrategies `locationName:"maintenanceStrategies" type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` @@ -107467,6 +108970,12 @@ func (s *SpotOptions) SetInstancePoolsToUseCount(v int64) *SpotOptions { return s } +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptions) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategies) *SpotOptions { + s.MaintenanceStrategies = v + return s +} + // SetMaxTotalPrice sets the MaxTotalPrice field's value. func (s *SpotOptions) SetMaxTotalPrice(v string) *SpotOptions { s.MaxTotalPrice = &v @@ -107519,6 +109028,10 @@ type SpotOptionsRequest struct { // across the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `type:"integer"` + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + MaintenanceStrategies *FleetSpotMaintenanceStrategiesRequest `type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. MaxTotalPrice *string `type:"string"` @@ -107563,6 +109076,12 @@ func (s *SpotOptionsRequest) SetInstancePoolsToUseCount(v int64) *SpotOptionsReq return s } +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptionsRequest) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategiesRequest) *SpotOptionsRequest { + s.MaintenanceStrategies = v + return s +} + // SetMaxTotalPrice sets the MaxTotalPrice field's value. func (s *SpotOptionsRequest) SetMaxTotalPrice(v string) *SpotOptionsRequest { s.MaxTotalPrice = &v @@ -108646,12 +110165,12 @@ type TagSpecification struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are: capacity-reservation | client-vpn-endpoint | customer-gateway - // | dedicated-host | dhcp-options | export-image-task | export-instance-task - // | fleet | fpga-image | host-reservation | import-image-task | import-snapshot-task - // | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | - // launch-template | placement-group | prefix-list | natgateway | network-acl - // | route-table | security-group | spot-fleet-request | spot-instances-request + // on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint + // | customer-gateway | dedicated-host | dhcp-options | export-image-task | + // export-instance-task | fleet | fpga-image | host-reservation | import-image-task + // | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 + // | key-pair | launch-template | placement-group | prefix-list | natgateway + // | network-acl | route-table | security-group | spot-fleet-request | spot-instances-request // | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target // | transit-gateway | transit-gateway-attachment | transit-gateway-route-table // | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and @@ -111341,6 +112860,9 @@ func (s *TransitGatewayVpcAttachment) SetVpcOwnerId(v string) *TransitGatewayVpc type TransitGatewayVpcAttachmentOptions struct { _ struct{} `type:"structure"` + // Indicates whether appliance mode support is enabled. + ApplianceModeSupport *string `locationName:"applianceModeSupport" type:"string" enum:"ApplianceModeSupportValue"` + // Indicates whether DNS support is enabled. DnsSupport *string `locationName:"dnsSupport" type:"string" enum:"DnsSupportValue"` @@ -111358,6 +112880,12 @@ func (s TransitGatewayVpcAttachmentOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *TransitGatewayVpcAttachmentOptions) SetApplianceModeSupport(v string) *TransitGatewayVpcAttachmentOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *TransitGatewayVpcAttachmentOptions) SetDnsSupport(v string) *TransitGatewayVpcAttachmentOptions { s.DnsSupport = &v @@ -112309,12 +113837,11 @@ type VCpuInfo struct { // The default number of vCPUs for the instance type. DefaultVCpus *int64 `locationName:"defaultVCpus" type:"integer"` - // List of the valid number of cores that can be configured for the instance - // type. + // The valid number of cores that can be configured for the instance type. ValidCores []*int64 `locationName:"validCores" locationNameList:"item" type:"list"` - // List of the valid number of threads per core that can be configured for the - // instance type. + // The valid number of threads per core that can be configured for the instance + // type. ValidThreadsPerCore []*int64 `locationName:"validThreadsPerCore" locationNameList:"item" type:"list"` } @@ -113627,6 +115154,9 @@ type VpcEndpointConnection struct { // The DNS entries for the VPC endpoint. DnsEntries []*DnsEntry `locationName:"dnsEntrySet" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of the Gateway Load Balancers for the service. + GatewayLoadBalancerArns []*string `locationName:"gatewayLoadBalancerArnSet" locationNameList:"item" type:"list"` + // The Amazon Resource Names (ARNs) of the network load balancers for the service. NetworkLoadBalancerArns []*string `locationName:"networkLoadBalancerArnSet" locationNameList:"item" type:"list"` @@ -113665,6 +115195,12 @@ func (s *VpcEndpointConnection) SetDnsEntries(v []*DnsEntry) *VpcEndpointConnect return s } +// SetGatewayLoadBalancerArns sets the GatewayLoadBalancerArns field's value. +func (s *VpcEndpointConnection) SetGatewayLoadBalancerArns(v []*string) *VpcEndpointConnection { + s.GatewayLoadBalancerArns = v + return s +} + // SetNetworkLoadBalancerArns sets the NetworkLoadBalancerArns field's value. func (s *VpcEndpointConnection) SetNetworkLoadBalancerArns(v []*string) *VpcEndpointConnection { s.NetworkLoadBalancerArns = v @@ -114883,6 +116419,22 @@ func AllowsMultipleInstanceTypes_Values() []string { } } +const ( + // ApplianceModeSupportValueEnable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueEnable = "enable" + + // ApplianceModeSupportValueDisable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueDisable = "disable" +) + +// ApplianceModeSupportValue_Values returns all elements of the ApplianceModeSupportValue enum +func ApplianceModeSupportValue_Values() []string { + return []string{ + ApplianceModeSupportValueEnable, + ApplianceModeSupportValueDisable, + } +} + const ( // ArchitectureTypeI386 is a ArchitectureType enum value ArchitectureTypeI386 = "i386" @@ -115447,6 +116999,22 @@ func ClientVpnConnectionStatusCode_Values() []string { } } +const ( + // ClientVpnEndpointAttributeStatusCodeApplying is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplying = "applying" + + // ClientVpnEndpointAttributeStatusCodeApplied is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplied = "applied" +) + +// ClientVpnEndpointAttributeStatusCode_Values returns all elements of the ClientVpnEndpointAttributeStatusCode enum +func ClientVpnEndpointAttributeStatusCode_Values() []string { + return []string{ + ClientVpnEndpointAttributeStatusCodeApplying, + ClientVpnEndpointAttributeStatusCodeApplied, + } +} + const ( // ClientVpnEndpointStatusCodePendingAssociate is a ClientVpnEndpointStatusCode enum value ClientVpnEndpointStatusCodePendingAssociate = "pending-associate" @@ -115915,6 +117483,26 @@ func EndDateType_Values() []string { } } +const ( + // EphemeralNvmeSupportUnsupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportUnsupported = "unsupported" + + // EphemeralNvmeSupportSupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportSupported = "supported" + + // EphemeralNvmeSupportRequired is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportRequired = "required" +) + +// EphemeralNvmeSupport_Values returns all elements of the EphemeralNvmeSupport enum +func EphemeralNvmeSupport_Values() []string { + return []string{ + EphemeralNvmeSupportUnsupported, + EphemeralNvmeSupportSupported, + EphemeralNvmeSupportRequired, + } +} + const ( // EventCodeInstanceReboot is a EventCode enum value EventCodeInstanceReboot = "instance-reboot" @@ -116143,6 +117731,18 @@ func FleetOnDemandAllocationStrategy_Values() []string { } } +const ( + // FleetReplacementStrategyLaunch is a FleetReplacementStrategy enum value + FleetReplacementStrategyLaunch = "launch" +) + +// FleetReplacementStrategy_Values returns all elements of the FleetReplacementStrategy enum +func FleetReplacementStrategy_Values() []string { + return []string{ + FleetReplacementStrategyLaunch, + } +} + const ( // FleetStateCodeSubmitted is a FleetStateCode enum value FleetStateCodeSubmitted = "submitted" @@ -116501,6 +118101,9 @@ const ( // InstanceAttributeNameEnaSupport is a InstanceAttributeName enum value InstanceAttributeNameEnaSupport = "enaSupport" + + // InstanceAttributeNameEnclaveOptions is a InstanceAttributeName enum value + InstanceAttributeNameEnclaveOptions = "enclaveOptions" ) // InstanceAttributeName_Values returns all elements of the InstanceAttributeName enum @@ -116520,6 +118123,7 @@ func InstanceAttributeName_Values() []string { InstanceAttributeNameEbsOptimized, InstanceAttributeNameSriovNetSupport, InstanceAttributeNameEnaSupport, + InstanceAttributeNameEnclaveOptions, } } @@ -117368,6 +118972,9 @@ const ( // InstanceTypeP3dn24xlarge is a InstanceType enum value InstanceTypeP3dn24xlarge = "p3dn.24xlarge" + // InstanceTypeP4d24xlarge is a InstanceType enum value + InstanceTypeP4d24xlarge = "p4d.24xlarge" + // InstanceTypeD2Xlarge is a InstanceType enum value InstanceTypeD2Xlarge = "d2.xlarge" @@ -117955,6 +119562,7 @@ func InstanceType_Values() []string { InstanceTypeP38xlarge, InstanceTypeP316xlarge, InstanceTypeP3dn24xlarge, + InstanceTypeP4d24xlarge, InstanceTypeD2Xlarge, InstanceTypeD22xlarge, InstanceTypeD24xlarge, @@ -118895,6 +120503,18 @@ func RecurringChargeFrequency_Values() []string { } } +const ( + // ReplacementStrategyLaunch is a ReplacementStrategy enum value + ReplacementStrategyLaunch = "launch" +) + +// ReplacementStrategy_Values returns all elements of the ReplacementStrategy enum +func ReplacementStrategy_Values() []string { + return []string{ + ReplacementStrategyLaunch, + } +} + const ( // ReportInstanceReasonCodesInstanceStuckInState is a ReportInstanceReasonCodes enum value ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" @@ -119331,6 +120951,22 @@ func Scope_Values() []string { } } +const ( + // SelfServicePortalEnabled is a SelfServicePortal enum value + SelfServicePortalEnabled = "enabled" + + // SelfServicePortalDisabled is a SelfServicePortal enum value + SelfServicePortalDisabled = "disabled" +) + +// SelfServicePortal_Values returns all elements of the SelfServicePortal enum +func SelfServicePortal_Values() []string { + return []string{ + SelfServicePortalEnabled, + SelfServicePortalDisabled, + } +} + const ( // ServiceStatePending is a ServiceState enum value ServiceStatePending = "Pending" @@ -119365,6 +121001,9 @@ const ( // ServiceTypeGateway is a ServiceType enum value ServiceTypeGateway = "Gateway" + + // ServiceTypeGatewayLoadBalancer is a ServiceType enum value + ServiceTypeGatewayLoadBalancer = "GatewayLoadBalancer" ) // ServiceType_Values returns all elements of the ServiceType enum @@ -119372,6 +121011,7 @@ func ServiceType_Values() []string { return []string{ ServiceTypeInterface, ServiceTypeGateway, + ServiceTypeGatewayLoadBalancer, } } @@ -120481,6 +122121,9 @@ const ( // VpcEndpointTypeGateway is a VpcEndpointType enum value VpcEndpointTypeGateway = "Gateway" + + // VpcEndpointTypeGatewayLoadBalancer is a VpcEndpointType enum value + VpcEndpointTypeGatewayLoadBalancer = "GatewayLoadBalancer" ) // VpcEndpointType_Values returns all elements of the VpcEndpointType enum @@ -120488,6 +122131,7 @@ func VpcEndpointType_Values() []string { return []string{ VpcEndpointTypeInterface, VpcEndpointTypeGateway, + VpcEndpointTypeGatewayLoadBalancer, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 62e4783a59009..2ab5d1dad25f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1212,6 +1212,106 @@ func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBuc return out, req.Send() } +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -2798,6 +2898,105 @@ func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEnc return out, req.Send() } +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -4314,9 +4513,10 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // -// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE -// storage classes, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// If the object you are retrieving is stored in the S3 Glacier, S3 Glacier +// Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep +// Archive storage classes, before you can retrieve the object you must first +// restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). // Otherwise, this operation returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // @@ -4429,6 +4629,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) @@ -5379,6 +5582,105 @@ func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input return out, req.Send() } +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the @@ -7066,6 +7368,106 @@ func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEnc return out, req.Send() } +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -7950,14 +8352,14 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:GetBucketOwnershipControls permission. For +// operation, you must have the s3:PutBucketOwnershipControls permission. For // more information about Amazon S3 permissions, see Specifying Permissions // in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // // For information about Amazon S3 Object Ownership, see Using Object Ownership // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // -// The following operations are related to GetBucketOwnershipControls: +// The following operations are related to PutBucketOwnershipControls: // // * GetBucketOwnershipControls // @@ -9599,58 +10001,56 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restoring Archives // -// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To -// access an archived object, you must first initiate a restore request. This -// restores a temporary copy of the archived object. In a restore request, you -// specify the number of days that you want the restored copy to exist. After -// the specified period, Amazon S3 deletes the temporary copy but the object -// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object -// was restored from. +// Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering +// Archive, or S3 Intelligent-Tiering Deep Archive storage classes are not accessible +// in real time. For objects in Archive Access tier or Deep Archive Access tier +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier +// Deep Archive you must first initiate a restore request, and then wait until +// a temporary copy of the object is available. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. // -// The time it takes restore jobs to finish depends on which storage class the -// object is being restored from and which data access tier you specify. -// // When restoring an archived object (or using a select request), you can specify // one of the following data access tier options in the Tier element of the // request body: // // * Expedited - Expedited retrievals allow you to quickly access your data -// stored in the GLACIER storage class when occasional urgent requests for -// a subset of archives are required. For all but the largest archived objects -// (250 MB+), data accessed using Expedited retrievals are typically made -// available within 1–5 minutes. Provisioned capacity ensures that retrieval -// capacity for Expedited retrievals is available when you need it. Expedited -// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE -// storage class. -// -// * Standard - S3 Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for the GLACIER -// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval -// option. S3 Standard retrievals typically complete within 3-5 hours from -// the GLACIER storage class and typically complete within 12 hours from -// the DEEP_ARCHIVE storage class. -// -// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval -// option, enabling you to retrieve large amounts, even petabytes, of data -// inexpensively in a day. Bulk retrievals typically complete within 5-12 -// hours from the GLACIER storage class and typically complete within 48 -// hours from the DEEP_ARCHIVE storage class. +// stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class +// when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive or +// S3 Intelligent-Tiering Deep Archive storage class. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// or S3 Intelligent-Tiering Archive storage class. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive or S3 +// Intelligent-Tiering Deep Archive storage class. Standard retrievals are +// free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They +// typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Bulk +// retrievals are free for objects stored in S3 Intelligent-Tiering. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) // in the Amazon Simple Storage Service Developer Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to -// a faster speed while it is in progress. You upgrade the speed of an in-progress -// restoration by issuing another restore request to the same object, setting -// a new Tier request element. When issuing a request to upgrade the restore -// tier, you must choose a tier that is faster than the tier that the in-progress -// restore is using. You must not change any other parameters, such as the Days -// request element. For more information, see Upgrading the Speed of an In-Progress -// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) // in the Amazon Simple Storage Service Developer Guide. // // To get the status of object restoration, you can send a HEAD request. Operations @@ -9679,11 +10079,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // A successful operation returns either the 200 OK or 202 Accepted status code. // -// * If the object copy is not previously restored, then Amazon S3 returns -// 202 Accepted in the response. +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. // -// * If the object copy is previously restored, Amazon S3 returns 200 OK -// in the response. +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. // // Special Errors // @@ -9691,11 +10091,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // (This error does not apply to SELECT type requests.) HTTP Status Code: // 409 Conflict SOAP Fault Code Prefix: Client // -// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited -// retrievals are currently not available. Try again later. (Returned if -// there is insufficient capacity to process the Expedited request. This -// error applies only to Expedited retrievals and not to S3 Standard or Bulk -// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A // // Related Resources // @@ -10564,12 +10964,14 @@ func (s *AbortMultipartUploadInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *AbortMultipartUploadInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type AbortMultipartUploadOutput struct { @@ -11701,12 +12103,14 @@ func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *CompleteMultipartUploadInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type CompleteMultipartUploadOutput struct { @@ -12505,12 +12909,14 @@ func (s *CopyObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *CopyObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type CopyObjectOutput struct { @@ -13244,12 +13650,14 @@ func (s *CreateMultipartUploadInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *CreateMultipartUploadInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type CreateMultipartUploadOutput struct { @@ -13600,12 +14008,14 @@ func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketAnalyticsConfigurationOutput struct { @@ -13698,12 +14108,14 @@ func (s *DeleteBucketCorsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketCorsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketCorsOutput struct { @@ -13797,12 +14209,14 @@ func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketEncryptionInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketEncryptionOutput struct { @@ -13895,14 +14309,120 @@ func (s *DeleteBucketInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } return nil } +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type DeleteBucketInventoryConfigurationInput struct { _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` @@ -13993,12 +14513,14 @@ func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketInventoryConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketInventoryConfigurationOutput struct { @@ -14091,12 +14613,14 @@ func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketLifecycleInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketLifecycleOutput struct { @@ -14203,12 +14727,14 @@ func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketMetricsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketMetricsConfigurationOutput struct { @@ -14247,6 +14773,9 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -14312,12 +14841,14 @@ func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketOwnershipControlsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketOwnershipControlsOutput struct { @@ -14410,12 +14941,14 @@ func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketPolicyInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketPolicyOutput struct { @@ -14508,12 +15041,14 @@ func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketReplicationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketReplicationOutput struct { @@ -14606,12 +15141,14 @@ func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketTaggingOutput struct { @@ -14704,12 +15241,14 @@ func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteBucketWebsiteInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteBucketWebsiteOutput struct { @@ -14787,24 +15326,25 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 replicates the delete markers. If you specify -// a Filter, you must specify this element. However, in the latest version of -// replication configuration (when Filter is specified), Amazon S3 doesn't replicate -// delete markers. Therefore, the DeleteMarkerReplication element can contain -// only Disabled. For an example configuration, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). // -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, Amazon -// S3 handled replication of delete markers differently. For more information, +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` // Indicates whether to replicate delete markers. // - // In the current implementation, Amazon S3 doesn't replicate the delete markers. - // The status must be Disabled. + // Indicates whether to replicate delete markers. Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } @@ -14976,12 +15516,14 @@ func (s *DeleteObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteObjectOutput struct { @@ -15145,12 +15687,14 @@ func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteObjectTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteObjectTaggingOutput struct { @@ -15322,12 +15866,14 @@ func (s *DeleteObjectsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeleteObjectsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeleteObjectsOutput struct { @@ -15450,12 +15996,14 @@ func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *DeletePublicAccessBlockInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type DeletePublicAccessBlockOutput struct { @@ -15557,9 +16105,8 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A container specifying replication metrics-related settings enabling metrics - // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified - // together with a ReplicationTime block. + // A container specifying replication metrics-related settings enabling replication + // metrics and events. Metrics *Metrics `type:"structure"` // A container specifying S3 Replication Time Control (S3 RTC), including whether @@ -16394,12 +16941,14 @@ func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketAccelerateConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketAccelerateConfigurationOutput struct { @@ -16501,12 +17050,14 @@ func (s *GetBucketAclInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketAclInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketAclOutput struct { @@ -16631,12 +17182,14 @@ func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketAnalyticsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketAnalyticsConfigurationOutput struct { @@ -16738,12 +17291,14 @@ func (s *GetBucketCorsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketCorsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketCorsOutput struct { @@ -16847,12 +17402,14 @@ func (s *GetBucketEncryptionInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketEncryptionInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketEncryptionOutput struct { @@ -16878,6 +17435,119 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv return s } +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + type GetBucketInventoryConfigurationInput struct { _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` @@ -16968,12 +17638,14 @@ func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketInventoryConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketInventoryConfigurationOutput struct { @@ -17075,12 +17747,14 @@ func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketLifecycleConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketLifecycleConfigurationOutput struct { @@ -17182,12 +17856,14 @@ func (s *GetBucketLifecycleInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketLifecycleInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketLifecycleOutput struct { @@ -17289,12 +17965,14 @@ func (s *GetBucketLocationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketLocationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketLocationOutput struct { @@ -17398,12 +18076,14 @@ func (s *GetBucketLoggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketLoggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketLoggingOutput struct { @@ -17522,12 +18202,14 @@ func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketMetricsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketMetricsConfigurationOutput struct { @@ -17629,12 +18311,14 @@ func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketNotificationConfigurationRequest) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketOwnershipControlsInput struct { @@ -17645,6 +18329,9 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -17710,12 +18397,14 @@ func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketOwnershipControlsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketOwnershipControlsOutput struct { @@ -17818,12 +18507,14 @@ func (s *GetBucketPolicyInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketPolicyInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketPolicyOutput struct { @@ -17925,12 +18616,14 @@ func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketPolicyStatusInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketPolicyStatusOutput struct { @@ -18032,12 +18725,14 @@ func (s *GetBucketReplicationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketReplicationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketReplicationOutput struct { @@ -18140,12 +18835,14 @@ func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketRequestPaymentInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketRequestPaymentOutput struct { @@ -18247,12 +18944,14 @@ func (s *GetBucketTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketTaggingOutput struct { @@ -18356,12 +19055,14 @@ func (s *GetBucketVersioningInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketVersioningInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketVersioningOutput struct { @@ -18474,12 +19175,14 @@ func (s *GetBucketWebsiteInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetBucketWebsiteInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetBucketWebsiteOutput struct { @@ -18655,12 +19358,14 @@ func (s *GetObjectAclInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectAclInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectAclOutput struct { @@ -18994,12 +19699,14 @@ func (s *GetObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectLegalHoldInput struct { @@ -19125,12 +19832,14 @@ func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectLegalHoldInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectLegalHoldOutput struct { @@ -19239,12 +19948,14 @@ func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectLockConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectLockConfigurationOutput struct { @@ -19714,12 +20425,14 @@ func (s *GetObjectRetentionInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectRetentionInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectRetentionOutput struct { @@ -19862,12 +20575,14 @@ func (s *GetObjectTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectTaggingOutput struct { @@ -20011,12 +20726,14 @@ func (s *GetObjectTorrentInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetObjectTorrentInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetObjectTorrentOutput struct { @@ -20129,12 +20846,14 @@ func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *GetPublicAccessBlockInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type GetPublicAccessBlockOutput struct { @@ -20165,7 +20884,7 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public type GlacierJobParameters struct { _ struct{} `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -20437,12 +21156,14 @@ func (s *HeadBucketInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *HeadBucketInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type HeadBucketOutput struct { @@ -20695,12 +21416,14 @@ func (s *HeadObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *HeadObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type HeadObjectOutput struct { @@ -20709,6 +21432,9 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -20880,6 +21606,12 @@ func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { return s } +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { s.CacheControl = &v @@ -21171,6 +21903,224 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + // Specifies the inventory configuration for an Amazon S3 bucket. For more information, // see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) // in the Amazon Simple Storage Service API Reference. @@ -22166,12 +23116,14 @@ func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListBucketAnalyticsConfigurationsOutput struct { @@ -22180,51 +23132,192 @@ type ListBucketAnalyticsConfigurationsOutput struct { // The list of analytics configurations for a bucket. AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The marker that is used as a starting point for this analytics configuration - // list response. This value is present if it was sent in the request. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. ContinuationToken *string `type:"string"` + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + // Indicates whether the returned list of analytics configurations is complete. // A value of true indicates that the list is not complete and the NextContinuationToken // will be provided for a subsequent request. IsTruncated *bool `type:"boolean"` - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. NextContinuationToken *string `type:"string"` } // String returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) String() string { +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { return s.String() } -// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { - s.AnalyticsConfigurationList = v +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v return s } -// SetContinuationToken sets the ContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { - s.ContinuationToken = &v +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v return s } // SetIsTruncated sets the IsTruncated field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { s.IsTruncated = &v return s } // SetNextContinuationToken sets the NextContinuationToken field's value. -func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { s.NextContinuationToken = &v return s } @@ -22317,12 +23410,14 @@ func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListBucketInventoryConfigurationsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListBucketInventoryConfigurationsOutput struct { @@ -22468,12 +23563,14 @@ func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListBucketMetricsConfigurationsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListBucketMetricsConfigurationsOutput struct { @@ -22752,12 +23849,14 @@ func (s *ListMultipartUploadsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListMultipartUploadsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListMultipartUploadsOutput struct { @@ -23054,12 +24153,14 @@ func (s *ListObjectVersionsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListObjectVersionsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListObjectVersionsOutput struct { @@ -23368,12 +24469,14 @@ func (s *ListObjectsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListObjectsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListObjectsOutput struct { @@ -23678,12 +24781,14 @@ func (s *ListObjectsV2Input) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListObjectsV2Input) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListObjectsV2Output struct { @@ -24010,12 +25115,14 @@ func (s *ListPartsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *ListPartsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type ListPartsOutput struct { @@ -24432,17 +25539,14 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } -// A container specifying replication metrics-related settings enabling metrics -// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified -// together with a ReplicationTime block. +// A container specifying replication metrics-related settings enabling replication +// metrics and events. type Metrics struct { _ struct{} `type:"structure"` // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold // event. - // - // EventThreshold is a required field - EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + EventThreshold *ReplicationTimeValue `type:"structure"` // Specifies whether the replication metrics are enabled. // @@ -24463,9 +25567,6 @@ func (s Metrics) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Metrics) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Metrics"} - if s.EventThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("EventThreshold")) - } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -25736,8 +26837,8 @@ type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // AWS services and authorized users within this account if the bucket has a - // public policy. + // AWS service principals and authorized users within this account if the bucket + // has a public policy. // // Enabling this setting doesn't affect previously stored bucket policies, except // that public and cross-account access within any public bucket policy, including @@ -25869,12 +26970,14 @@ func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketAccelerateConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketAccelerateConfigurationOutput struct { @@ -26036,12 +27139,14 @@ func (s *PutBucketAclInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketAclInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketAclOutput struct { @@ -26167,12 +27272,14 @@ func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketAnalyticsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketAnalyticsConfigurationOutput struct { @@ -26287,12 +27394,14 @@ func (s *PutBucketCorsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketCorsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketCorsOutput struct { @@ -26408,12 +27517,14 @@ func (s *PutBucketEncryptionInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketEncryptionInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketEncryptionOutput struct { @@ -26430,6 +27541,129 @@ func (s PutBucketEncryptionOutput) GoString() string { return s.String() } +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type PutBucketInventoryConfigurationInput struct { _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` @@ -26539,12 +27773,14 @@ func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketInventoryConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketInventoryConfigurationOutput struct { @@ -26651,12 +27887,14 @@ func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketLifecycleConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketLifecycleConfigurationOutput struct { @@ -26761,12 +27999,14 @@ func (s *PutBucketLifecycleInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketLifecycleInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketLifecycleOutput struct { @@ -26878,12 +28118,14 @@ func (s *PutBucketLoggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketLoggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketLoggingOutput struct { @@ -27009,12 +28251,14 @@ func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketMetricsConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketMetricsConfigurationOutput struct { @@ -27127,12 +28371,14 @@ func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketNotificationConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketNotificationConfigurationOutput struct { @@ -27239,12 +28485,14 @@ func (s *PutBucketNotificationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketNotificationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketNotificationOutput struct { @@ -27269,6 +28517,9 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want @@ -27354,12 +28605,14 @@ func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketOwnershipControlsInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketOwnershipControlsOutput struct { @@ -27476,12 +28729,14 @@ func (s *PutBucketPolicyInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketPolicyInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketPolicyOutput struct { @@ -27517,6 +28772,7 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -27602,12 +28858,14 @@ func (s *PutBucketReplicationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketReplicationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketReplicationOutput struct { @@ -27719,12 +28977,14 @@ func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketRequestPaymentInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketRequestPaymentOutput struct { @@ -27836,12 +29096,14 @@ func (s *PutBucketTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketTaggingOutput struct { @@ -27958,12 +29220,14 @@ func (s *PutBucketVersioningInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketVersioningInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketVersioningOutput struct { @@ -28075,12 +29339,14 @@ func (s *PutBucketWebsiteInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutBucketWebsiteInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutBucketWebsiteOutput struct { @@ -28313,12 +29579,14 @@ func (s *PutObjectAclInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectAclInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectAclOutput struct { @@ -28785,12 +30053,14 @@ func (s *PutObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectLegalHoldInput struct { @@ -28926,12 +30196,14 @@ func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectLegalHoldInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectLegalHoldOutput struct { @@ -29065,12 +30337,14 @@ func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectLockConfigurationInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectLockConfigurationOutput struct { @@ -29352,12 +30626,14 @@ func (s *PutObjectRetentionInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectRetentionInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectRetentionOutput struct { @@ -29520,12 +30796,14 @@ func (s *PutObjectTaggingInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutObjectTaggingInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutObjectTaggingOutput struct { @@ -29646,12 +30924,14 @@ func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *PutPublicAccessBlockInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type PutPublicAccessBlockOutput struct { @@ -30043,16 +31323,18 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 replicates the delete markers. If you specify - // a Filter, you must specify this element. However, in the latest version of - // replication configuration (when Filter is specified), Amazon S3 doesn't replicate - // delete markers. Therefore, the DeleteMarkerReplication element can contain - // only Disabled. For an example configuration, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, Amazon - // S3 handled replication of delete markers differently. For more information, + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` @@ -30498,7 +31780,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { type RestoreObjectInput struct { _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` - // The bucket name or containing the object to restore. + // The bucket name containing the object to restore. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. @@ -30639,12 +31921,14 @@ func (s *RestoreObjectInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *RestoreObjectInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type RestoreObjectOutput struct { @@ -30687,6 +31971,9 @@ type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. Days *int64 `type:"integer"` // The optional description for the job. @@ -30702,7 +31989,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -31463,12 +32750,14 @@ func (s *SelectObjectContentInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *SelectObjectContentInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type SelectObjectContentOutput struct { @@ -32176,6 +33465,65 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of days that you want your archived data to be accessible. The + // minimum number of days specified in the restore request must be at least + // 90 days. If a smaller value is specifed it will be ignored. + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + // A container for specifying the configuration for publication of messages // to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. @@ -32694,12 +34042,14 @@ func (s *UploadPartCopyInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *UploadPartCopyInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type UploadPartCopyOutput struct { @@ -33010,12 +34360,14 @@ func (s *UploadPartInput) hasEndpointARN() bool { // updateArnableField updates the value of the input field that // takes an ARN as an input. This method is useful to backfill // the parsed resource name from ARN into the input member. -func (s *UploadPartInput) updateArnableField(v string) error { +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return fmt.Errorf("member Bucket is nil") + return nil, fmt.Errorf("member Bucket is nil") } s.Bucket = aws.String(v) - return nil + return &s, nil } type UploadPartOutput struct { @@ -33230,6 +34582,22 @@ func AnalyticsS3ExportFileFormat_Values() []string { } } +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + const ( // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value BucketAccelerateStatusEnabled = "Enabled" @@ -33625,6 +34993,38 @@ func FilterRuleName_Values() []string { } } +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index dd73d460cf3d6..f64b55135eee4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -21,6 +21,12 @@ const ( // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + // ErrCodeNoSuchBucket for service response error code // "NoSuchBucket". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index bca091d758ea8..7c62218784357 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -96,6 +96,10 @@ type S3API interface { DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) @@ -164,6 +168,10 @@ type S3API interface { GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) @@ -272,6 +280,10 @@ type S3API interface { ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) @@ -339,6 +351,10 @@ type S3API interface { PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go index 98b507b0348e5..ceed4e7ba1991 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go @@ -2,6 +2,8 @@ package alertmanager import ( "context" + "crypto/md5" + "encoding/binary" "fmt" "net/http" "net/url" @@ -32,6 +34,7 @@ import ( "github.com/prometheus/alertmanager/types" "github.com/prometheus/alertmanager/ui" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/common/route" ) @@ -70,6 +73,11 @@ type Alertmanager struct { // The Dispatcher is the only component we need to recreate when we call ApplyConfig. // Given its metrics don't have any variable labels we need to re-use the same metrics. dispatcherMetrics *dispatch.DispatcherMetrics + // This needs to be set to the hash of the config. All the hashes need to be same + // for deduping of alerts to work, hence we need this metric. See https://github.com/prometheus/alertmanager/issues/596 + // Further, in upstream AM, this metric is handled using the config coordinator which we don't use + // hence we need to generate the metric ourselves. + configHashMetric prometheus.Gauge activeMtx sync.Mutex active bool @@ -97,6 +105,10 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { stop: make(chan struct{}), active: false, activeMtx: sync.Mutex{}, + configHashMetric: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "alertmanager_config_hash", + Help: "Hash of the currently loaded alertmanager configuration.", + }), } am.registry = reg @@ -182,7 +194,7 @@ func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration { } // ApplyConfig applies a new configuration to an Alertmanager. -func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config) error { +func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg string) error { templateFiles := make([]string, len(conf.Templates)) if len(conf.Templates) > 0 { for i, t := range conf.Templates { @@ -249,6 +261,7 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config) error { am.active = true am.activeMtx.Unlock() + am.configHashMetric.Set(md5HashAsMetricValue([]byte(rawCfg))) return nil } @@ -367,3 +380,12 @@ func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, log } return integrations, nil } + +func md5HashAsMetricValue(data []byte) float64 { + sum := md5.Sum(data) + // We only want 48 bits as a float64 only has a 53 bit mantissa. + smallSum := sum[0:6] + var bytes = make([]byte, 8) + copy(bytes, smallSum) + return float64(binary.LittleEndian.Uint64(bytes)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go index 354370d016a8e..d500bcafde5be 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go @@ -1,8 +1,6 @@ package alertmanager import ( - "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/util" @@ -11,18 +9,18 @@ import ( // This struct aggregates metrics exported by Alertmanager // and re-exports those aggregates as Cortex metrics. type alertmanagerMetrics struct { - // Maps userID -> registry - regsMu sync.Mutex - regs map[string]*prometheus.Registry + regs *util.UserRegistries // exported metrics, gathered from Alertmanager API alertsReceived *prometheus.Desc alertsInvalid *prometheus.Desc // exported metrics, gathered from Alertmanager PipelineBuilder - numNotifications *prometheus.Desc - numFailedNotifications *prometheus.Desc - notificationLatencySeconds *prometheus.Desc + numNotifications *prometheus.Desc + numFailedNotifications *prometheus.Desc + numNotificationRequestsTotal *prometheus.Desc + numNotificationRequestsFailedTotal *prometheus.Desc + notificationLatencySeconds *prometheus.Desc // exported metrics, gathered from Alertmanager nflog nflogGCDuration *prometheus.Desc @@ -45,12 +43,14 @@ type alertmanagerMetrics struct { silencesQueryDuration *prometheus.Desc silences *prometheus.Desc silencesPropagatedMessagesTotal *prometheus.Desc + + // The alertmanager config hash. + configHashValue *prometheus.Desc } func newAlertmanagerMetrics() *alertmanagerMetrics { return &alertmanagerMetrics{ - regs: map[string]*prometheus.Registry{}, - regsMu: sync.Mutex{}, + regs: util.NewUserRegistries(), alertsReceived: prometheus.NewDesc( "cortex_alertmanager_alerts_received_total", "The total number of received alerts.", @@ -67,6 +67,14 @@ func newAlertmanagerMetrics() *alertmanagerMetrics { "cortex_alertmanager_notifications_failed_total", "The total number of failed notifications.", []string{"user", "integration"}, nil), + numNotificationRequestsTotal: prometheus.NewDesc( + "cortex_alertmanager_notification_requests_total", + "The total number of attempted notification requests.", + []string{"user", "integration"}, nil), + numNotificationRequestsFailedTotal: prometheus.NewDesc( + "cortex_alertmanager_notification_requests_failed_total", + "The total number of failed notification requests.", + []string{"user", "integration"}, nil), notificationLatencySeconds: prometheus.NewDesc( "cortex_alertmanager_notification_latency_seconds", "The latency of notifications in seconds.", @@ -135,25 +143,15 @@ func newAlertmanagerMetrics() *alertmanagerMetrics { "cortex_alertmanager_silences", "How many silences by state.", []string{"user", "state"}, nil), + configHashValue: prometheus.NewDesc( + "cortex_alertmanager_config_hash", + "Hash of the currently loaded alertmanager configuration.", + []string{"user"}, nil), } } func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Registry) { - m.regsMu.Lock() - m.regs[user] = reg - m.regsMu.Unlock() -} - -func (m *alertmanagerMetrics) registries() map[string]*prometheus.Registry { - regs := map[string]*prometheus.Registry{} - - m.regsMu.Lock() - defer m.regsMu.Unlock() - for uid, r := range m.regs { - regs[uid] = r - } - - return regs + m.regs.AddUserRegistry(user, reg) } func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { @@ -161,7 +159,10 @@ func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.alertsInvalid out <- m.numNotifications out <- m.numFailedNotifications + out <- m.numNotificationRequestsTotal + out <- m.numNotificationRequestsFailedTotal out <- m.notificationLatencySeconds + out <- m.markerAlerts out <- m.nflogGCDuration out <- m.nflogSnapshotDuration out <- m.nflogSnapshotSize @@ -169,25 +170,27 @@ func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.nflogQueryErrorsTotal out <- m.nflogQueryDuration out <- m.nflogPropagatedMessagesTotal - out <- m.markerAlerts out <- m.silencesGCDuration out <- m.silencesSnapshotDuration out <- m.silencesSnapshotSize out <- m.silencesQueriesTotal out <- m.silencesQueryErrorsTotal out <- m.silencesQueryDuration - out <- m.silences out <- m.silencesPropagatedMessagesTotal + out <- m.silences + out <- m.configHashValue } func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) { - data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + data := m.regs.BuildMetricFamiliesPerUser() data.SendSumOfCountersPerUser(out, m.alertsReceived, "alertmanager_alerts_received_total") data.SendSumOfCountersPerUser(out, m.alertsInvalid, "alertmanager_alerts_invalid_total") data.SendSumOfCountersPerUserWithLabels(out, m.numNotifications, "alertmanager_notifications_total", "integration") data.SendSumOfCountersPerUserWithLabels(out, m.numFailedNotifications, "alertmanager_notifications_failed_total", "integration") + data.SendSumOfCountersPerUserWithLabels(out, m.numNotificationRequestsTotal, "alertmanager_notification_requests_total", "integration") + data.SendSumOfCountersPerUserWithLabels(out, m.numNotificationRequestsFailedTotal, "alertmanager_notification_requests_failed_total", "integration") data.SendSumOfHistograms(out, m.notificationLatencySeconds, "alertmanager_notification_latency_seconds") data.SendSumOfGaugesPerUserWithLabels(out, m.markerAlerts, "alertmanager_alerts", "state") @@ -207,4 +210,6 @@ func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfHistograms(out, m.silencesQueryDuration, "alertmanager_silences_query_duration_seconds") data.SendSumOfCounters(out, m.silencesPropagatedMessagesTotal, "alertmanager_silences_gossip_messages_propagated_total") data.SendSumOfGaugesPerUserWithLabels(out, m.silences, "alertmanager_silences", "state") + + data.SendMaxOfGaugesPerUser(out, m.configHashValue, "alertmanager_config_hash") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go index 68e75d7556b32..d2d3f09fa6553 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go @@ -8,13 +8,13 @@ import ( "path/filepath" "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/template" - "github.com/weaveworks/common/user" "gopkg.in/yaml.v2" ) @@ -36,7 +36,7 @@ type UserConfig struct { func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http.Request) { logger := util.WithContext(r.Context(), am.logger) - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, err := tenant.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) @@ -73,7 +73,7 @@ func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http. func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http.Request) { logger := util.WithContext(r.Context(), am.logger) - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, err := tenant.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) @@ -114,7 +114,7 @@ func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http. func (am *MultitenantAlertmanager) DeleteUserConfig(w http.ResponseWriter, r *http.Request) { logger := util.WithContext(r.Context(), am.logger) - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, err := tenant.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go index 5db09e63eeb14..a203295f4e85d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go @@ -20,9 +20,9 @@ import ( amconfig "github.com/prometheus/alertmanager/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" @@ -125,6 +125,14 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { cfg.Store.RegisterFlags(f) } +// Validate config and returns error on failure +func (cfg *MultitenantAlertmanagerConfig) Validate() error { + if err := cfg.Store.Validate(); err != nil { + return errors.Wrap(err, "invalid storage config") + } + return nil +} + type multitenantAlertmanagerMetrics struct { lastReloadSuccessful *prometheus.GaugeVec lastReloadSuccessfulTimestamp *prometheus.GaugeVec @@ -347,27 +355,6 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi } } -func (am *MultitenantAlertmanager) transformConfig(userID string, amConfig *amconfig.Config) (*amconfig.Config, error) { - if amConfig == nil { // shouldn't happen, but check just in case - return nil, fmt.Errorf("no usable Cortex configuration for %v", userID) - } - if am.cfg.AutoWebhookRoot != "" { - for _, r := range amConfig.Receivers { - for _, w := range r.WebhookConfigs { - if w.URL.String() == autoWebhookURL { - u, err := url.Parse(am.cfg.AutoWebhookRoot + "/" + userID + "/monitor") - if err != nil { - return nil, err - } - w.URL = &amconfig.URL{URL: u} - } - } - } - } - - return amConfig, nil -} - // setConfig applies the given configuration to the alertmanager for `userID`, // creating an alertmanager if it doesn't already exist. func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { @@ -391,6 +378,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) + rawCfg := cfg.RawConfig if cfg.RawConfig == "" { if am.fallbackConfig == "" { return fmt.Errorf("blank Alertmanager configuration for %v", cfg.User) @@ -400,26 +388,45 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { if err != nil { return fmt.Errorf("unable to load fallback configuration for %v: %v", cfg.User, err) } + rawCfg = am.fallbackConfig } else { userAmConfig, err = amconfig.Load(cfg.RawConfig) if err != nil && hasExisting { - // XXX: This means that if a user has a working configuration and - // they submit a broken one, we'll keep processing the last known - // working configuration, and they'll never know. - // TODO: Provide a way of communicating this to the user and for removing - // Alertmanager instances. + // This means that if a user has a working config and + // they submit a broken one, the Manager will keep running the last known + // working configuration. return fmt.Errorf("invalid Cortex configuration for %v: %v", cfg.User, err) } } - if userAmConfig, err = am.transformConfig(cfg.User, userAmConfig); err != nil { - return err + // We can have an empty configuration here if: + // 1) the user had a previous alertmanager + // 2) then, submitted a non-working configuration (and we kept running the prev working config) + // 3) finally, the cortex AM instance is restarted and the running version is no longer present + if userAmConfig == nil { + return fmt.Errorf("no usable Alertmanager configuration for %v", cfg.User) + } + + // Transform webhook configs URLs to the per tenant monitor + if am.cfg.AutoWebhookRoot != "" { + for i, r := range userAmConfig.Receivers { + for j, w := range r.WebhookConfigs { + if w.URL.String() == autoWebhookURL { + u, err := url.Parse(am.cfg.AutoWebhookRoot + "/" + cfg.User + "/monitor") + if err != nil { + return err + } + + userAmConfig.Receivers[i].WebhookConfigs[j].URL = &amconfig.URL{URL: u} + } + } + } } // If no Alertmanager instance exists for this user yet, start one. if !hasExisting { level.Debug(am.logger).Log("msg", "initializing new per-tenant alertmanager", "user", cfg.User) - newAM, err := am.newAlertmanager(cfg.User, userAmConfig) + newAM, err := am.newAlertmanager(cfg.User, userAmConfig, rawCfg) if err != nil { return err } @@ -429,7 +436,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User) // If the config changed, apply the new one. - err := existing.ApplyConfig(cfg.User, userAmConfig) + err := existing.ApplyConfig(cfg.User, userAmConfig, rawCfg) if err != nil { return fmt.Errorf("unable to apply Alertmanager config for user %v: %v", cfg.User, err) } @@ -438,7 +445,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { return nil } -func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config) (*Alertmanager, error) { +func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config, rawCfg string) (*Alertmanager, error) { reg := prometheus.NewRegistry() newAM, err := New(&Config{ UserID: userID, @@ -453,7 +460,7 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco return nil, fmt.Errorf("unable to start Alertmanager for user %v: %v", userID, err) } - if err := newAM.ApplyConfig(userID, amConfig); err != nil { + if err := newAM.ApplyConfig(userID, amConfig, rawCfg); err != nil { return nil, fmt.Errorf("unable to apply initial config for user %v: %v", userID, err) } @@ -463,7 +470,7 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco // ServeHTTP serves the Alertmanager's web UI and API. func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) + userID, err := tenant.TenantID(req.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -474,6 +481,7 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re if ok { if !userAM.IsActive() { + level.Debug(am.logger).Log("msg", "the Alertmanager is not active", "user", userID) http.Error(w, "the Alertmanager is not configured", http.StatusNotFound) return } @@ -485,6 +493,7 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re if am.fallbackConfig != "" { userAM, err = am.alertmanagerFromFallbackConfig(userID) if err != nil { + level.Error(am.logger).Log("msg", "unable to initialize the Alertmanager with a fallback configuration", "user", userID, "err", err) http.Error(w, "Failed to initialize the Alertmanager", http.StatusInternalServerError) return } @@ -493,6 +502,7 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re return } + level.Debug(am.logger).Log("msg", "the Alertmanager has no configuration and no fallback specified", "user", userID) http.Error(w, "the Alertmanager is not configured", http.StatusNotFound) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go index 4ec78890b7c16..21c6c4812d4da 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go @@ -5,6 +5,8 @@ import ( "flag" "fmt" + "github.com/pkg/errors" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb" "github.com/cortexproject/cortex/pkg/alertmanager/alerts/local" @@ -43,6 +45,14 @@ func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.S3.RegisterFlagsWithPrefix("alertmanager.storage.", f) } +// Validate config and returns error on failure +func (cfg *AlertStoreConfig) Validate() error { + if err := cfg.S3.Validate(); err != nil { + return errors.Wrap(err, "invalid S3 Storage config") + } + return nil +} + // NewAlertStore returns a new rule storage backend poller and store func NewAlertStore(cfg AlertStoreConfig) (AlertStore, error) { switch cfg.Type { diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index 5b04d66022b2f..e174b53c6af85 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -2,27 +2,17 @@ package api import ( "context" - "errors" "flag" "net/http" - "regexp" "strings" "time" - "github.com/opentracing-contrib/go-stdlib/nethttp" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - + "github.com/NYTimes/gziphandler" "github.com/felixge/fgprof" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/gorilla/mux" - "github.com/prometheus/common/route" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/storage" - v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" @@ -30,17 +20,24 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/purger" "github.com/cortexproject/cortex/pkg/compactor" "github.com/cortexproject/cortex/pkg/distributor" + frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" + "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" + frontendv2 "github.com/cortexproject/cortex/pkg/frontend/v2" + "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/scheduler" + "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/util/push" ) type Config struct { + ResponseCompression bool `yaml:"response_compression_enabled"` + AlertmanagerHTTPPrefix string `yaml:"alertmanager_http_prefix"` PrometheusHTTPPrefix string `yaml:"prometheus_http_prefix"` @@ -52,6 +49,7 @@ type Config struct { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.ResponseCompression, "api.response-compression-enabled", false, "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.") cfg.RegisterFlagsWithPrefix("", f) } @@ -62,12 +60,13 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { } type API struct { - cfg Config - authMiddleware middleware.Interface - server *server.Server - logger log.Logger - sourceIPs *middleware.SourceIPExtractor - indexPage *IndexPageContent + AuthMiddleware middleware.Interface + + cfg Config + server *server.Server + logger log.Logger + sourceIPs *middleware.SourceIPExtractor + indexPage *IndexPageContent } func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logger) (*API, error) { @@ -86,7 +85,7 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge api := &API{ cfg: cfg, - authMiddleware: cfg.HTTPAuthMiddleware, + AuthMiddleware: cfg.HTTPAuthMiddleware, server: s, logger: logger, sourceIPs: sourceIPs, @@ -95,7 +94,7 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge // If no authentication middleware is present in the config, use the default authentication middleware. if cfg.HTTPAuthMiddleware == nil { - api.authMiddleware = middleware.AuthenticateUser + api.AuthMiddleware = middleware.AuthenticateUser } return api, nil @@ -104,30 +103,35 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge // RegisterRoute registers a single route enforcing HTTP methods. A single // route is expected to be specific about which HTTP methods are supported. func (a *API) RegisterRoute(path string, handler http.Handler, auth bool, method string, methods ...string) { - a.registerRouteWithRouter(a.server.HTTP, path, handler, auth, method, methods...) -} - -// RegisterRoute registers a single route to a router, enforcing HTTP methods. A single -// route is expected to be specific about which HTTP methods are supported. -func (a *API) registerRouteWithRouter(router *mux.Router, path string, handler http.Handler, auth bool, method string, methods ...string) { methods = append([]string{method}, methods...) level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "path", path, "auth", auth) + if auth { - handler = a.authMiddleware.Wrap(handler) + handler = a.AuthMiddleware.Wrap(handler) } + + if a.cfg.ResponseCompression { + handler = gziphandler.GzipHandler(handler) + } + if len(methods) == 0 { - router.Path(path).Handler(handler) + a.server.HTTP.Path(path).Handler(handler) return } - router.Path(path).Methods(methods...).Handler(handler) + a.server.HTTP.Path(path).Methods(methods...).Handler(handler) } func (a *API) RegisterRoutesWithPrefix(prefix string, handler http.Handler, auth bool, methods ...string) { level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "prefix", prefix, "auth", auth) if auth { - handler = a.authMiddleware.Wrap(handler) + handler = a.AuthMiddleware.Wrap(handler) } + + if a.cfg.ResponseCompression { + handler = gziphandler.GzipHandler(handler) + } + if len(methods) == 0 { a.server.HTTP.PathPrefix(prefix).Handler(handler) return @@ -135,20 +139,6 @@ func (a *API) RegisterRoutesWithPrefix(prefix string, handler http.Handler, auth a.server.HTTP.PathPrefix(prefix).Methods(methods...).Handler(handler) } -// Latest Prometheus requires r.RemoteAddr to be set to addr:port, otherwise it reject the request. -// Requests to Querier sometimes doesn't have that (if they are fetched from Query-Frontend). -// Prometheus uses this when logging queries to QueryLogger, but Cortex doesn't call engine.SetQueryLogger to set one. -// -// Can be removed when (if) https://github.com/prometheus/prometheus/pull/6840 is merged. -func fakeRemoteAddr(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RemoteAddr == "" { - r.RemoteAddr = "127.0.0.1:8888" - } - handler.ServeHTTP(w, r) - }) -} - // RegisterAlertmanager registers endpoints associated with the alertmanager. It will only // serve endpoints using the legacy http-prefix if it is not run as a single binary. func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target, apiEnabled bool) { @@ -200,9 +190,9 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") } -// ingester is defined as an interface to allow for alternative implementations +// Ingester is defined as an interface to allow for alternative implementations // of ingesters to be passed into the API.RegisterIngester() method. -type ingester interface { +type Ingester interface { client.IngesterServer FlushHandler(http.ResponseWriter, *http.Request) ShutdownHandler(http.ResponseWriter, *http.Request) @@ -210,7 +200,7 @@ type ingester interface { } // RegisterIngester registers the ingesters HTTP and GRPC service -func (a *API) RegisterIngester(i ingester, pushConfig distributor.Config) { +func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { client.RegisterIngesterServer(a.server.GRPC, i) a.indexPage.AddLink(SectionDangerous, "/ingester/flush", "Trigger a Flush of data from Ingester to storage") @@ -225,10 +215,10 @@ func (a *API) RegisterIngester(i ingester, pushConfig distributor.Config) { a.RegisterRoute("/push", push.Handler(pushConfig, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. } -// RegisterPurger registers the endpoints associated with the Purger/DeleteStore. They do not exactly +// RegisterChunksPurger registers the endpoints associated with the Purger/DeleteStore. They do not exactly // match the Prometheus API but mirror it closely enough to justify their routing under the Prometheus // component/ -func (a *API) RegisterPurger(store *purger.DeleteStore, deleteRequestCancelPeriod time.Duration) { +func (a *API) RegisterChunksPurger(store *purger.DeleteStore, deleteRequestCancelPeriod time.Duration) { deleteRequestHandler := purger.NewDeleteRequestHandler(store, deleteRequestCancelPeriod, prometheus.DefaultRegisterer) a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/admin/tsdb/delete_series", http.HandlerFunc(deleteRequestHandler.AddDeleteRequestHandler), true, "PUT", "POST") @@ -241,6 +231,11 @@ func (a *API) RegisterPurger(store *purger.DeleteStore, deleteRequestCancelPerio a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/admin/tsdb/cancel_delete_request", http.HandlerFunc(deleteRequestHandler.CancelDeleteRequestHandler), true, "PUT", "POST") } +func (a *API) RegisterBlocksPurger(api *purger.BlocksPurgerAPI) { + a.RegisterRoute("/purger/delete_tenant", http.HandlerFunc(api.DeleteTenant), true, "POST") + a.RegisterRoute("/purger/delete_tenant_status", http.HandlerFunc(api.DeleteTenantStatus), true, "GET") +} + // RegisterRuler registers routes associated with the Ruler service. func (a *API) RegisterRuler(r *ruler.Ruler) { a.indexPage.AddLink(SectionAdminEndpoints, "/ruler/ring", "Ruler Ring Status") @@ -302,114 +297,22 @@ func (a *API) RegisterCompactor(c *compactor.Compactor) { a.RegisterRoute("/compactor/ring", http.HandlerFunc(c.RingHandler), false, "GET", "POST") } -// RegisterQuerier registers the Prometheus routes supported by the -// Cortex querier service. Currently this can not be registered simultaneously -// with the QueryFrontend. -func (a *API) RegisterQuerier( +// RegisterQueryable registers the the default routes associated with the querier +// module. +func (a *API) RegisterQueryable( queryable storage.SampleAndChunkQueryable, - engine *promql.Engine, distributor *distributor.Distributor, - registerRoutesExternally bool, - tombstonesLoader *purger.TombstonesLoader, - querierRequestDuration *prometheus.HistogramVec, - receivedMessageSize *prometheus.HistogramVec, - sentMessageSize *prometheus.HistogramVec, - inflightRequests *prometheus.GaugeVec, -) http.Handler { - api := v1.NewAPI( - engine, - errorTranslateQueryable{queryable}, // Translate errors to errors expected by API. - func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, - func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} }, - func() config.Config { return config.Config{} }, - map[string]string{}, // TODO: include configuration flags - v1.GlobalURLOptions{}, - func(f http.HandlerFunc) http.HandlerFunc { return f }, - nil, // Only needed for admin APIs. - "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. - false, // Disable admin APIs. - a.logger, - func(context.Context) v1.RulesRetriever { return &querier.DummyRulesRetriever{} }, - 0, 0, 0, // Remote read samples and concurrency limit. - regexp.MustCompile(".*"), - func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, - &v1.PrometheusVersion{}, - // This is used for the stats API which we should not support. Or find other ways to. - prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }), - ) - +) { // these routes are always registered to the default server a.RegisterRoute("/api/v1/user_stats", http.HandlerFunc(distributor.UserStatsHandler), true, "GET") a.RegisterRoute("/api/v1/chunks", querier.ChunksHandler(queryable), true, "GET") a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/user_stats", http.HandlerFunc(distributor.UserStatsHandler), true, "GET") a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/chunks", querier.ChunksHandler(queryable), true, "GET") - - // these routes are either registered the default server OR to an internal mux. The internal mux is - // for use in a single binary mode when both the query frontend and the querier would attempt to claim these routes - // TODO: Add support to expose querier paths with a configurable prefix in single binary mode. - router := mux.NewRouter() - if registerRoutesExternally { - router = a.server.HTTP - } - - // Use a separate metric for the querier in order to differentiate requests from the query-frontend when - // running Cortex as a single binary. - inst := middleware.Instrument{ - RouteMatcher: router, - Duration: querierRequestDuration, - RequestBodySize: receivedMessageSize, - ResponseBodySize: sentMessageSize, - InflightRequests: inflightRequests, - } - - promRouter := route.New().WithPrefix(a.cfg.ServerPrefix + a.cfg.PrometheusHTTPPrefix + "/api/v1") - api.Register(promRouter) - cacheGenHeaderMiddleware := getHTTPCacheGenNumberHeaderSetterMiddleware(tombstonesLoader) - promHandler := fakeRemoteAddr(inst.Wrap(cacheGenHeaderMiddleware.Wrap(promRouter))) - - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/read", querier.RemoteReadHandler(queryable), true, "POST") - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/query", promHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/query_range", promHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/labels", promHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/label/{name}/values", promHandler, true, "GET") - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/series", promHandler, true, "GET", "POST", "DELETE") - //TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: - // https://github.com/prometheus/prometheus/pull/7125/files - a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/metadata", querier.MetadataHandler(distributor), true, "GET") - - legacyPromRouter := route.New().WithPrefix(a.cfg.ServerPrefix + a.cfg.LegacyHTTPPrefix + "/api/v1") - api.Register(legacyPromRouter) - legacyPromHandler := fakeRemoteAddr(inst.Wrap(cacheGenHeaderMiddleware.Wrap(legacyPromRouter))) - - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/read", querier.RemoteReadHandler(queryable), true, "POST") - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/query", legacyPromHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/query_range", legacyPromHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/labels", legacyPromHandler, true, "GET", "POST") - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/label/{name}/values", legacyPromHandler, true, "GET") - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/series", legacyPromHandler, true, "GET", "POST", "DELETE") - //TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: - // https://github.com/prometheus/prometheus/pull/7125/files - a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/metadata", querier.MetadataHandler(distributor), true, "GET") - - // if we have externally registered routes then we need to return the server handler - // so that we continue to use all standard middleware - if registerRoutesExternally { - return a.server.HTTPServer.Handler - } - - // Since we have a new router and the request will not go trough the default server - // HTTP middleware stack, we need to add a middleware to extract the trace context - // from the HTTP headers and inject it into the Go context. - return nethttp.MiddlewareFunc(opentracing.GlobalTracer(), router.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string { - return "internalQuerier" - })) } -// registerQueryAPI registers the Prometheus routes supported by the -// Cortex querier service. Currently this can not be registered simultaneously -// with the Querier. -func (a *API) registerQueryAPI(handler http.Handler) { +// RegisterQueryAPI registers the Prometheus API routes with the provided handler. +func (a *API) RegisterQueryAPI(handler http.Handler) { a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/read", handler, true, "POST") a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/query", handler, true, "GET", "POST") a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/query_range", handler, true, "GET", "POST") @@ -431,9 +334,21 @@ func (a *API) registerQueryAPI(handler http.Handler) { // RegisterQueryFrontend registers the Prometheus routes supported by the // Cortex querier service. Currently this can not be registered simultaneously // with the Querier. -func (a *API) RegisterQueryFrontend(f *frontend.Frontend) { - frontend.RegisterFrontendServer(a.server.GRPC, f) - a.registerQueryAPI(f.Handler()) +func (a *API) RegisterQueryFrontendHandler(h http.Handler) { + a.RegisterQueryAPI(h) +} + +func (a *API) RegisterQueryFrontend1(f *frontendv1.Frontend) { + frontendv1pb.RegisterFrontendServer(a.server.GRPC, f) +} + +func (a *API) RegisterQueryFrontend2(f *frontendv2.Frontend) { + frontendv2pb.RegisterFrontendForQuerierServer(a.server.GRPC, f) +} + +func (a *API) RegisterQueryScheduler(f *scheduler.Scheduler) { + schedulerpb.RegisterSchedulerForFrontendServer(a.server.GRPC, f) + schedulerpb.RegisterSchedulerForQuerierServer(a.server.GRPC, f) } // RegisterServiceMapHandler registers the Cortex structs service handler diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go index 665d220b3964f..9afffcc873029 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go @@ -1,14 +1,35 @@ package api import ( + "context" "html/template" "net/http" "path" + "regexp" "sync" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/gorilla/mux" + "github.com/opentracing-contrib/go-stdlib/nethttp" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/route" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/weaveworks/common/instrument" + "github.com/weaveworks/common/middleware" "gopkg.in/yaml.v2" + "github.com/cortexproject/cortex/pkg/chunk/purger" + "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/querier" + "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/util" ) @@ -57,7 +78,7 @@ func (pc *IndexPageContent) GetContent() map[string]map[string]string { return result } -var indexPageTemplate = ` +var indexPageTemplate = ` @@ -109,3 +130,123 @@ func configHandler(cfg interface{}) http.HandlerFunc { } } } + +// NewQuerierHandler returns a HTTP handler that can be used by the querier service to +// either register with the frontend worker query processor or with the external HTTP +// server to fulfill the Prometheus query API. +func NewQuerierHandler( + cfg Config, + queryable storage.SampleAndChunkQueryable, + engine *promql.Engine, + distributor *distributor.Distributor, + tombstonesLoader *purger.TombstonesLoader, + reg prometheus.Registerer, + logger log.Logger, +) http.Handler { + // Prometheus histograms for requests to the querier. + querierRequestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "querier_request_duration_seconds", + Help: "Time (in seconds) spent serving HTTP requests to the querier.", + Buckets: instrument.DefBuckets, + }, []string{"method", "route", "status_code", "ws"}) + + receivedMessageSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "querier_request_message_bytes", + Help: "Size (in bytes) of messages received in the request to the querier.", + Buckets: middleware.BodySizeBuckets, + }, []string{"method", "route"}) + + sentMessageSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "querier_response_message_bytes", + Help: "Size (in bytes) of messages sent in response by the querier.", + Buckets: middleware.BodySizeBuckets, + }, []string{"method", "route"}) + + inflightRequests := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "querier_inflight_requests", + Help: "Current number of inflight requests to the querier.", + }, []string{"method", "route"}) + + api := v1.NewAPI( + engine, + errorTranslateQueryable{queryable}, // Translate errors to errors expected by API. + func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, + func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} }, + func() config.Config { return config.Config{} }, + map[string]string{}, // TODO: include configuration flags + v1.GlobalURLOptions{}, + func(f http.HandlerFunc) http.HandlerFunc { return f }, + nil, // Only needed for admin APIs. + "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. + false, // Disable admin APIs. + logger, + func(context.Context) v1.RulesRetriever { return &querier.DummyRulesRetriever{} }, + 0, 0, 0, // Remote read samples and concurrency limit. + regexp.MustCompile(".*"), + func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, + &v1.PrometheusVersion{}, + // This is used for the stats API which we should not support. Or find other ways to. + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }), + ) + + router := mux.NewRouter() + + // Use a separate metric for the querier in order to differentiate requests from the query-frontend when + // running Cortex as a single binary. + inst := middleware.Instrument{ + RouteMatcher: router, + Duration: querierRequestDuration, + RequestBodySize: receivedMessageSize, + ResponseBodySize: sentMessageSize, + InflightRequests: inflightRequests, + } + cacheGenHeaderMiddleware := getHTTPCacheGenNumberHeaderSetterMiddleware(tombstonesLoader) + middlewares := middleware.Merge(inst, cacheGenHeaderMiddleware) + router.Use(middlewares.Wrap) + + // Define the prefixes for all routes + prefix := cfg.ServerPrefix + cfg.PrometheusHTTPPrefix + legacyPrefix := cfg.ServerPrefix + cfg.LegacyHTTPPrefix + + promRouter := route.New().WithPrefix(prefix + "/api/v1") + api.Register(promRouter) + + legacyPromRouter := route.New().WithPrefix(legacyPrefix + "/api/v1") + api.Register(legacyPromRouter) + + // TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: + // https://github.com/prometheus/prometheus/pull/7125/files + router.Path(prefix + "/api/v1/metadata").Handler(querier.MetadataHandler(distributor)) + router.Path(prefix + "/api/v1/read").Handler(querier.RemoteReadHandler(queryable)) + router.Path(prefix + "/api/v1/read").Methods("POST").Handler(promRouter) + router.Path(prefix+"/api/v1/query").Methods("GET", "POST").Handler(promRouter) + router.Path(prefix+"/api/v1/query_range").Methods("GET", "POST").Handler(promRouter) + router.Path(prefix+"/api/v1/labels").Methods("GET", "POST").Handler(promRouter) + router.Path(prefix + "/api/v1/label/{name}/values").Methods("GET").Handler(promRouter) + router.Path(prefix+"/api/v1/series").Methods("GET", "POST", "DELETE").Handler(promRouter) + router.Path(prefix + "/api/v1/metadata").Methods("GET").Handler(promRouter) + + // TODO(gotjosh): This custom handler is temporary until we're able to vendor the changes in: + // https://github.com/prometheus/prometheus/pull/7125/files + router.Path(legacyPrefix + "/api/v1/metadata").Handler(querier.MetadataHandler(distributor)) + router.Path(legacyPrefix + "/api/v1/read").Handler(querier.RemoteReadHandler(queryable)) + router.Path(legacyPrefix + "/api/v1/read").Methods("POST").Handler(legacyPromRouter) + router.Path(legacyPrefix+"/api/v1/query").Methods("GET", "POST").Handler(legacyPromRouter) + router.Path(legacyPrefix+"/api/v1/query_range").Methods("GET", "POST").Handler(legacyPromRouter) + router.Path(legacyPrefix+"/api/v1/labels").Methods("GET", "POST").Handler(legacyPromRouter) + router.Path(legacyPrefix + "/api/v1/label/{name}/values").Methods("GET").Handler(legacyPromRouter) + router.Path(legacyPrefix+"/api/v1/series").Methods("GET", "POST", "DELETE").Handler(legacyPromRouter) + router.Path(legacyPrefix + "/api/v1/metadata").Methods("GET").Handler(legacyPromRouter) + + // Add a middleware to extract the trace context and add a header. + handler := nethttp.MiddlewareFunc(opentracing.GlobalTracer(), router.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string { + return "internalQuerier" + })) + + // Track execution time. + return stats.NewWallTimeMiddleware().Wrap(handler) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go b/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go index 3a4dfbafef668..f7ec9d6a819a3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go @@ -4,17 +4,17 @@ import ( "net/http" "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk/purger" "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/cortexproject/cortex/pkg/tenant" ) // middleware for setting cache gen header to let consumer of response know all previous responses could be invalid due to delete operation func getHTTPCacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader *purger.TombstonesLoader) middleware.Interface { return middleware.Func(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userID, err := user.ExtractOrgID(r.Context()) + userID, err := tenant.TenantID(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 345a4feed1edf..6b47005052650 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -87,6 +87,14 @@ func (cfg *StorageConfig) RegisterFlags(f *flag.FlagSet) { cfg.S3Config.RegisterFlags(f) } +// Validate config and returns error on failure +func (cfg *StorageConfig) Validate() error { + if err := cfg.S3Config.Validate(); err != nil { + return errors.Wrap(err, "invalid S3 Storage config") + } + return nil +} + type dynamoDBStorageClient struct { cfg DynamoDBConfig schemaCfg chunk.SchemaConfig diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 718384d79a7c2..db4bafcf38413 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "flag" + "fmt" "hash/fnv" "io" "net" @@ -14,18 +15,32 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" + v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/minio/minio-go/v7/pkg/signer" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" awscommon "github.com/weaveworks/common/aws" "github.com/weaveworks/common/instrument" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) +const ( + SignatureVersionV4 = "v4" + SignatureVersionV2 = "v2" +) + +var ( + supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2} + errUnsupportedSignatureVersion = errors.New("unsupported signature version") +) + var ( s3RequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", @@ -48,14 +63,15 @@ type S3Config struct { S3 flagext.URLValue S3ForcePathStyle bool - BucketNames string - Endpoint string `yaml:"endpoint"` - Region string `yaml:"region"` - AccessKeyID string `yaml:"access_key_id"` - SecretAccessKey string `yaml:"secret_access_key"` - Insecure bool `yaml:"insecure"` - SSEEncryption bool `yaml:"sse_encryption"` - HTTPConfig HTTPConfig `yaml:"http_config"` + BucketNames string + Endpoint string `yaml:"endpoint"` + Region string `yaml:"region"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` + Insecure bool `yaml:"insecure"` + SSEEncryption bool `yaml:"sse_encryption"` + HTTPConfig HTTPConfig `yaml:"http_config"` + SignatureVersion string `yaml:"signature_version"` Inject InjectRequestMiddleware `yaml:"-"` } @@ -89,6 +105,15 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.HTTPConfig.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The maximum amount of time an idle connection will be held open.") f.DurationVar(&cfg.HTTPConfig.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 0, "If non-zero, specifies the amount of time to wait for a server's response headers after fully writing the request.") f.BoolVar(&cfg.HTTPConfig.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "Set to false to skip verifying the certificate chain and hostname.") + f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", "))) +} + +// Validate config and returns error on failure +func (cfg *S3Config) Validate() error { + if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) { + return errUnsupportedSignatureVersion + } + return nil } type S3ObjectClient struct { @@ -111,6 +136,10 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { s3Client := s3.New(sess) + if cfg.SignatureVersion == SignatureVersionV2 { + s3Client.Handlers.Sign.Swap(v4.SignRequestHandler.Name, v2SignRequestHandler(cfg)) + } + var sseEncryption *string if cfg.SSEEncryption { sseEncryption = aws.String("AES256") @@ -124,6 +153,28 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { return &client, nil } +func v2SignRequestHandler(cfg S3Config) request.NamedHandler { + return request.NamedHandler{ + Name: "v2.SignRequestHandler", + Fn: func(req *request.Request) { + credentials, err := req.Config.Credentials.GetWithContext(req.Context()) + if err != nil { + if err != nil { + req.Error = err + return + } + } + + req.HTTPRequest = signer.SignV2( + *req.HTTPRequest, + credentials.AccessKeyID, + credentials.SecretAccessKey, + !cfg.S3ForcePathStyle, + ) + }, + } +} + func buildS3Config(cfg S3Config) (*aws.Config, []string, error) { var s3Config *aws.Config var err error @@ -137,7 +188,6 @@ func buildS3Config(cfg S3Config) (*aws.Config, []string, error) { } else { s3Config = &aws.Config{} s3Config = s3Config.WithRegion("dummy") - s3Config = s3Config.WithCredentials(credentials.AnonymousCredentials) } s3Config = s3Config.WithMaxRetries(0) // We do our own retries, so we can monitor them @@ -317,11 +367,14 @@ func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([] commonPrefixes = append(commonPrefixes, chunk.StorageCommonPrefix(aws.StringValue(commonPrefix.Prefix))) } - if !*output.IsTruncated { + if output.IsTruncated == nil || !*output.IsTruncated { // No more results to fetch break } - + if output.NextContinuationToken == nil { + // No way to continue + break + } input.SetContinuationToken(*output.NextContinuationToken) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index a509c5bfb6e44..1e638f6091f0f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -34,6 +34,8 @@ type Config struct { SSL bool `yaml:"SSL"` HostVerification bool `yaml:"host_verification"` CAPath string `yaml:"CA_path"` + CertPath string `yaml:"tls_cert_path"` + KeyPath string `yaml:"tls_key_path"` Auth bool `yaml:"auth"` Username string `yaml:"username"` Password flagext.Secret `yaml:"password"` @@ -62,6 +64,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.SSL, "cassandra.ssl", false, "Use SSL when connecting to cassandra instances.") f.BoolVar(&cfg.HostVerification, "cassandra.host-verification", true, "Require SSL certificate validation.") f.StringVar(&cfg.CAPath, "cassandra.ca-path", "", "Path to certificate file to verify the peer.") + f.StringVar(&cfg.CertPath, "cassandra.tls-cert-path", "", "Path to certificate file used by TLS.") + f.StringVar(&cfg.KeyPath, "cassandra.tls-key-path", "", "Path to private key file used by TLS.") f.BoolVar(&cfg.Auth, "cassandra.auth", false, "Enable password authentication when connecting to cassandra.") f.StringVar(&cfg.Username, "cassandra.username", "", "Username to use when connecting to cassandra.") f.Var(&cfg.Password, "cassandra.password", "Password to use when connecting to cassandra.") @@ -86,6 +90,12 @@ func (cfg *Config) Validate() error { if cfg.SSL && cfg.HostVerification && len(strings.Split(cfg.Addresses, ",")) != 1 { return errors.Errorf("Host verification is only possible for a single host.") } + if cfg.SSL && cfg.CertPath != "" && cfg.KeyPath == "" { + return errors.Errorf("TLS certificate specified, but private key configuration is missing.") + } + if cfg.SSL && cfg.KeyPath != "" && cfg.CertPath == "" { + return errors.Errorf("TLS private key specified, but certificate configuration is missing.") + } return nil } @@ -144,17 +154,29 @@ func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error { cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup if cfg.SSL { + tlsConfig := &tls.Config{} + + if cfg.CertPath != "" { + cert, err := tls.LoadX509KeyPair(cfg.CertPath, cfg.KeyPath) + if err != nil { + return errors.Wrap(err, "Unable to load TLS certificate and private key") + } + + tlsConfig.Certificates = []tls.Certificate{cert} + } + if cfg.HostVerification { + tlsConfig.ServerName = strings.Split(cfg.Addresses, ",")[0] + cluster.SslOpts = &gocql.SslOptions{ CaPath: cfg.CAPath, EnableHostVerification: true, - Config: &tls.Config{ - ServerName: strings.Split(cfg.Addresses, ",")[0], - }, + Config: tlsConfig, } } else { cluster.SslOpts = &gocql.SslOptions{ EnableHostVerification: false, + Config: tlsConfig, } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go index d52acf4bc4dee..b9fde2bd7f259 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go @@ -20,15 +20,12 @@ import ( "github.com/cortexproject/cortex/pkg/prom1/storage/metric" ) -// Errors that decode can return const ( - ErrInvalidChecksum = errs.Error("invalid chunk checksum") - ErrWrongMetadata = errs.Error("wrong chunk metadata") - ErrMetadataLength = errs.Error("chunk metadata wrong length") - ErrDataLength = errs.Error("chunk data wrong length") - ErrSliceOutOfRange = errs.Error("chunk can't be sliced out of its data range") - ErrSliceNoDataInRange = errs.Error("chunk has no data for given range to slice") - ErrSliceChunkOverflow = errs.Error("slicing should not overflow a chunk") + ErrInvalidChecksum = errs.Error("invalid chunk checksum") + ErrWrongMetadata = errs.Error("wrong chunk metadata") + ErrMetadataLength = errs.Error("chunk metadata wrong length") + ErrDataLength = errs.Error("chunk data wrong length") + ErrSliceOutOfRange = errs.Error("chunk can't be sliced out of its data range") ) var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) @@ -186,8 +183,14 @@ var writerPool = sync.Pool{ // Encode writes the chunk into a buffer, and calculates the checksum. func (c *Chunk) Encode() error { - var buf bytes.Buffer + return c.EncodeTo(nil) +} +// EncodeTo is like Encode but you can provide your own buffer to use. +func (c *Chunk) EncodeTo(buf *bytes.Buffer) error { + if buf == nil { + buf = bytes.NewBuffer(nil) + } // Write 4 empty bytes first - we will come back and put the len in here. metadataLenBytes := [4]byte{} if _, err := buf.Write(metadataLenBytes[:]); err != nil { @@ -197,7 +200,7 @@ func (c *Chunk) Encode() error { // Encode chunk metadata into snappy-compressed buffer writer := writerPool.Get().(*snappy.Writer) defer writerPool.Put(writer) - writer.Reset(&buf) + writer.Reset(buf) json := jsoniter.ConfigFastest if err := json.NewEncoder(writer).Encode(c); err != nil { return err @@ -217,7 +220,7 @@ func (c *Chunk) Encode() error { } // And now the chunk data - if err := c.Data.Marshal(&buf); err != nil { + if err := c.Data.Marshal(buf); err != nil { return err } @@ -338,39 +341,11 @@ func (c *Chunk) Slice(from, through model.Time) (*Chunk, error) { return nil, ErrSliceOutOfRange } - itr := c.Data.NewIterator(nil) - if !itr.FindAtOrAfter(from) { - return nil, ErrSliceNoDataInRange - } - - pc, err := prom_chunk.NewForEncoding(c.Data.Encoding()) + pc, err := c.Data.Rebound(from, through) if err != nil { return nil, err } - for !itr.Value().Timestamp.After(through) { - oc, err := pc.Add(itr.Value()) - if err != nil { - return nil, err - } - - if oc != nil { - return nil, ErrSliceChunkOverflow - } - if !itr.Scan() { - break - } - } - - err = itr.Err() - if err != nil { - return nil, err - } - - if pc.Len() == 0 { - return nil, ErrSliceNoDataInRange - } - nc := NewChunk(c.UserID, c.Fingerprint, c.Metric, pc, from, through) return &nc, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 2352938a80073..0c212abb2fe99 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -16,8 +17,10 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/chunk/cache" + "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -74,11 +77,16 @@ func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.") - f.Var(&cfg.MaxLookBackPeriod, "store.max-look-back-period", "Limit how long back data can be queried") + f.Var(&cfg.MaxLookBackPeriod, "store.max-look-back-period", "Deprecated: use -querier.max-query-lookback instead. Limit how long back data can be queried. This setting applies to chunks storage only.") // To be removed in Cortex 1.8. } // Validate validates the store config. -func (cfg *StoreConfig) Validate() error { +func (cfg *StoreConfig) Validate(logger log.Logger) error { + if cfg.MaxLookBackPeriod > 0 { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(logger).Log("msg", "running with DEPRECATED flag -store.max-look-back-period, use -querier.max-query-lookback instead.") + } + if err := cfg.ChunkCacheConfig.Validate(); err != nil { return err } @@ -246,7 +254,7 @@ func (c *baseStore) LabelValuesForMetricName(ctx context.Context, userID string, var result UniqueStrings for _, entry := range entries { - _, labelValue, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) + _, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) if err != nil { return nil, err } @@ -450,7 +458,8 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, fro } func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]IndexQuery) []IndexQuery) ([]string, error) { - log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", formatMatcher(matcher)) + formattedMatcher := formatMatcher(matcher) + log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", formattedMatcher) defer log.Span.Finish() var err error @@ -468,11 +477,11 @@ func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thro if err != nil { return nil, err } - level.Debug(log).Log("matcher", formatMatcher(matcher), "queries", len(queries)) + level.Debug(log).Log("matcher", formattedMatcher, "queries", len(queries)) if filter != nil { queries = filter(queries) - level.Debug(log).Log("matcher", formatMatcher(matcher), "filteredQueries", len(queries)) + level.Debug(log).Log("matcher", formattedMatcher, "filteredQueries", len(queries)) } entries, err := c.lookupEntriesByQueries(ctx, queries) @@ -483,13 +492,13 @@ func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, thro } else if err != nil { return nil, err } - level.Debug(log).Log("matcher", formatMatcher(matcher), "entries", len(entries)) + level.Debug(log).Log("matcher", formattedMatcher, "entries", len(entries)) ids, err := c.parseIndexEntries(ctx, entries, matcher) if err != nil { return nil, err } - level.Debug(log).Log("matcher", formatMatcher(matcher), "ids", len(ids)) + level.Debug(log).Log("matcher", formattedMatcher, "ids", len(ids)) return ids, nil } @@ -551,7 +560,7 @@ func (c *baseStore) parseIndexEntries(_ context.Context, entries []IndexEntry, m result := make([]string, 0, len(entries)) for _, entry := range entries { - chunkKey, labelValue, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) + chunkKey, labelValue, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value) if err != nil { return nil, err } @@ -677,7 +686,7 @@ func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, pa var newChunks []*Chunk if partiallyDeletedInterval.Start > chunk.From { newChunk, err := chunk.Slice(chunk.From, partiallyDeletedInterval.Start-1) - if err != nil && err != ErrSliceNoDataInRange { + if err != nil && err != encoding.ErrSliceNoDataInRange { return errors.Wrapf(err, "when slicing chunk for interval %d - %d", chunk.From, partiallyDeletedInterval.Start-1) } @@ -688,7 +697,7 @@ func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, pa if partiallyDeletedInterval.End < chunk.Through { newChunk, err := chunk.Slice(partiallyDeletedInterval.End+1, chunk.Through) - if err != nil && err != ErrSliceNoDataInRange { + if err != nil && err != encoding.ErrSliceNoDataInRange { return errors.Wrapf(err, "when slicing chunk for interval %d - %d", partiallyDeletedInterval.End+1, chunk.Through) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go index 8683ebc5a00bc..c05defedb172c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go @@ -210,6 +210,10 @@ func (b *bigchunk) Slice(start, end model.Time) Chunk { } } +func (b *bigchunk) Rebound(start, end model.Time) (Chunk, error) { + return reboundChunk(b, start, end) +} + type writer struct { io.Writer } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go index b31304714d1b7..97c95e41a7736 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go @@ -22,12 +22,18 @@ import ( "sort" "github.com/prometheus/common/model" + errs "github.com/weaveworks/common/errors" "github.com/cortexproject/cortex/pkg/prom1/storage/metric" ) -// ChunkLen is the length of a chunk in bytes. -const ChunkLen = 1024 +const ( + // ChunkLen is the length of a chunk in bytes. + ChunkLen = 1024 + + ErrSliceNoDataInRange = errs.Error("chunk has no data for given range to slice") + ErrSliceChunkOverflow = errs.Error("slicing should not overflow a chunk") +) var ( errChunkBoundsExceeded = errors.New("attempted access outside of chunk boundaries") @@ -50,10 +56,15 @@ type Chunk interface { Encoding() Encoding Utilization() float64 - // Slice returns a smaller chunk the includes all samples between start and end + // Slice returns a smaller chunk that includes all samples between start and end // (inclusive). Its may over estimate. On some encodings it is a noop. Slice(start, end model.Time) Chunk + // Rebound returns a smaller chunk that includes all samples between start and end (inclusive). + // We do not want to change existing Slice implementations because + // it is built specifically for query optimization and is a noop for some of the encodings. + Rebound(start, end model.Time) (Chunk, error) + // Len returns the number of samples in the chunk. Implementations may be // expensive. Len() int @@ -246,3 +257,40 @@ func (it *indexAccessingChunkIterator) Batch(size int) Batch { func (it *indexAccessingChunkIterator) Err() error { return it.acc.err() } + +func reboundChunk(c Chunk, start, end model.Time) (Chunk, error) { + itr := c.NewIterator(nil) + if !itr.FindAtOrAfter(start) { + return nil, ErrSliceNoDataInRange + } + + pc, err := NewForEncoding(c.Encoding()) + if err != nil { + return nil, err + } + + for !itr.Value().Timestamp.After(end) { + oc, err := pc.Add(itr.Value()) + if err != nil { + return nil, err + } + + if oc != nil { + return nil, ErrSliceChunkOverflow + } + if !itr.Scan() { + break + } + } + + err = itr.Err() + if err != nil { + return nil, err + } + + if pc.Len() == 0 { + return nil, ErrSliceNoDataInRange + } + + return pc, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go index 683ce844eef6a..e0e43e7d63bdd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go @@ -233,6 +233,10 @@ func (c *doubleDeltaEncodedChunk) Slice(_, _ model.Time) Chunk { return c } +func (c *doubleDeltaEncodedChunk) Rebound(start, end model.Time) (Chunk, error) { + return reboundChunk(c, start, end) +} + // Marshal implements chunk. func (c doubleDeltaEncodedChunk) Marshal(w io.Writer) error { if len(c) > math.MaxUint16 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go index a9d1c2f28771d..fe67337ecadff 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go @@ -287,6 +287,10 @@ func (c *varbitChunk) Slice(_, _ model.Time) Chunk { return c } +func (c *varbitChunk) Rebound(start, end model.Time) (Chunk, error) { + return reboundChunk(c, start, end) +} + // Marshal implements chunk. func (c varbitChunk) Marshal(w io.Writer) error { size := c.Size() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go index 7c9998f657f62..5e7b635be71b9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/grpc/grpc_client.go @@ -22,7 +22,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func connectToGrpcServer(serverAddress string) (GrpcStoreClient, *grpc.ClientConn, error) { params := keepalive.ClientParameters{ Time: time.Second * 20, - Timeout: time.Minute * 10, + Timeout: time.Second * 10, PermitWithoutStream: true, } param := grpc.WithKeepaliveParams(params) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go new file mode 100644 index 0000000000000..543865739dc66 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go @@ -0,0 +1,121 @@ +package purger + +import ( + "context" + "net/http" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/bucket" + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util" +) + +type BlocksPurgerAPI struct { + bucketClient objstore.Bucket + logger log.Logger +} + +func NewBlocksPurgerAPI(storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (*BlocksPurgerAPI, error) { + bucketClient, err := createBucketClient(storageCfg, logger, reg) + if err != nil { + return nil, err + } + + return newBlocksPurgerAPI(bucketClient, logger), nil +} + +func newBlocksPurgerAPI(bkt objstore.Bucket, logger log.Logger) *BlocksPurgerAPI { + return &BlocksPurgerAPI{bucketClient: bkt, logger: logger} +} + +func (api *BlocksPurgerAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userID, err := tenant.TenantID(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + level.Info(api.logger).Log("msg", "tenant deletion marker created", "user", userID) + + w.WriteHeader(http.StatusOK) +} + +type DeleteTenantStatusResponse struct { + TenantID string `json:"tenant_id"` + BlocksDeleted bool `json:"blocks_deleted"` + RuleGroupsDeleted bool `json:"rule_groups_deleted"` + AlertManagerConfigDeleted bool `json:"alert_manager_config_deleted"` +} + +func (api *BlocksPurgerAPI) DeleteTenantStatus(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userID, err := tenant.TenantID(ctx) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + result := DeleteTenantStatusResponse{} + result.TenantID = userID + result.BlocksDeleted, err = api.checkBlocksForUser(ctx, userID) + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + util.WriteJSONResponse(w, result) +} + +func (api *BlocksPurgerAPI) checkBlocksForUser(ctx context.Context, userID string) (bool, error) { + var errBlockFound = errors.New("block found") + + userBucket := bucket.NewUserBucketClient(userID, api.bucketClient) + err := userBucket.Iter(ctx, "", func(s string) error { + s = strings.TrimSuffix(s, "/") + + _, err := ulid.Parse(s) + if err != nil { + // not block, keep looking + return nil + } + + // Used as shortcut to stop iteration. + return errBlockFound + }) + + if errors.Is(err, errBlockFound) { + return false, nil + } + + if err != nil { + return false, err + } + + // No blocks found, all good. + return true, nil +} + +func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { + bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "purger", logger, reg) + if err != nil { + return nil, errors.Wrap(err, "create bucket client") + } + + return bucketClient, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go index 4f8d1025f37d9..2818a061f9855 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/delete_requests_store.go @@ -142,21 +142,29 @@ func (ds *DeleteStore) addDeleteRequest(ctx context.Context, userID string, crea // GetDeleteRequestsByStatus returns all delete requests for given status. func (ds *DeleteStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, HashValue: string(deleteRequestID), ValueEqual: []byte(status)}}) + return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ + TableName: ds.cfg.RequestsTableName, + HashValue: string(deleteRequestID), + ValueEqual: []byte(status), + }) } // GetDeleteRequestsForUserByStatus returns all delete requests for a user with given status. func (ds *DeleteStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, HashValue: string(deleteRequestID), RangeValuePrefix: []byte(userID), ValueEqual: []byte(status)}, + return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ + TableName: ds.cfg.RequestsTableName, + HashValue: string(deleteRequestID), + RangeValuePrefix: []byte(userID), + ValueEqual: []byte(status), }) } // GetAllDeleteRequestsForUser returns all delete requests for a user. func (ds *DeleteStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { - return ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, HashValue: string(deleteRequestID), RangeValuePrefix: []byte(userID)}, + return ds.queryDeleteRequests(ctx, chunk.IndexQuery{ + TableName: ds.cfg.RequestsTableName, + HashValue: string(deleteRequestID), + RangeValuePrefix: []byte(userID), }) } @@ -180,8 +188,10 @@ func (ds *DeleteStore) UpdateStatus(ctx context.Context, userID, requestID strin func (ds *DeleteStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) - deleteRequests, err := ds.queryDeleteRequests(ctx, []chunk.IndexQuery{ - {TableName: ds.cfg.RequestsTableName, HashValue: string(deleteRequestID), RangeValuePrefix: []byte(userIDAndRequestID)}, + deleteRequests, err := ds.queryDeleteRequests(ctx, chunk.IndexQuery{ + TableName: ds.cfg.RequestsTableName, + HashValue: string(deleteRequestID), + RangeValuePrefix: []byte(userIDAndRequestID), }) if err != nil { @@ -210,9 +220,10 @@ func (ds *DeleteStore) GetPendingDeleteRequestsForUser(ctx context.Context, user return pendingDeleteRequests, nil } -func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery []chunk.IndexQuery) ([]DeleteRequest, error) { +func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery chunk.IndexQuery) ([]DeleteRequest, error) { deleteRequests := []DeleteRequest{} - err := ds.indexClient.QueryPages(ctx, deleteQuery, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { + // No need to lock inside the callback since we run a single index query. + err := ds.indexClient.QueryPages(ctx, []chunk.IndexQuery{deleteQuery}, func(query chunk.IndexQuery, batch chunk.ReadBatch) (shouldContinue bool) { itr := batch.Iterator() for itr.Next() { userID, requestID := splitUserIDAndRequestID(string(itr.RangeValue())) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go index 8f419e1f59f87..5e7131a587d4d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go @@ -85,7 +85,7 @@ type deleteRequestWithLogger struct { logger log.Logger // logger is initialized with userID and requestID to add context to every log generated using this } -// Config holds config for Purger +// Config holds config for chunks Purger type Config struct { Enable bool `yaml:"enable"` NumWorkers int `yaml:"num_workers"` @@ -108,7 +108,7 @@ type workerJob struct { logger log.Logger } -// Purger does the purging of data which is requested to be deleted +// Purger does the purging of data which is requested to be deleted. Purger only works for chunks. type Purger struct { services.Service diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go index b3eb5be4014fa..0799716afad54 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go @@ -12,8 +12,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql/parser" - "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" ) @@ -54,7 +54,7 @@ func NewDeleteRequestHandler(deleteStore *DeleteStore, deleteRequestCancelPeriod // AddDeleteRequestHandler handles addition of new delete request func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -119,7 +119,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r // GetAllDeleteRequestsHandler handles get all delete requests func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -141,7 +141,7 @@ func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWrite // CancelDeleteRequestHandler handles delete request cancellation func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go index ed13060615ea1..2bb8eecb0544e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go @@ -8,6 +8,7 @@ import ( "encoding/json" "strconv" "strings" + "sync" "fmt" @@ -82,8 +83,8 @@ func rangeValuePrefix(ss ...[]byte) []byte { return buildRangeValue(0, ss...) } -func decodeRangeKey(value []byte) [][]byte { - components := make([][]byte, 0, 5) +func decodeRangeKey(value []byte, components [][]byte) [][]byte { + components = components[:0] i, j := 0, 0 for j < len(value) { if value[j] != 0 { @@ -134,7 +135,10 @@ func encodeTime(t uint32) []byte { // range values. Currently checks range value key and returns the value as the // metric name. func parseMetricNameRangeValue(rangeValue []byte, value []byte) (model.LabelValue, error) { - components := decodeRangeKey(rangeValue) + componentRef := componentsPool.Get().(*componentRef) + defer componentsPool.Put(componentRef) + components := decodeRangeKey(rangeValue, componentRef.components) + switch { case len(components) < 4: return "", fmt.Errorf("invalid metric name range value: %x", rangeValue) @@ -151,7 +155,10 @@ func parseMetricNameRangeValue(rangeValue []byte, value []byte) (model.LabelValu // parseSeriesRangeValue returns the model.Metric stored in metric fingerprint // range values. func parseSeriesRangeValue(rangeValue []byte, value []byte) (model.Metric, error) { - components := decodeRangeKey(rangeValue) + componentRef := componentsPool.Get().(*componentRef) + defer componentsPool.Put(componentRef) + components := decodeRangeKey(rangeValue, componentRef.components) + switch { case len(components) < 4: return nil, fmt.Errorf("invalid metric range value: %x", rangeValue) @@ -169,12 +176,24 @@ func parseSeriesRangeValue(rangeValue []byte, value []byte) (model.Metric, error } } +type componentRef struct { + components [][]byte +} + +var componentsPool = sync.Pool{ + New: func() interface{} { + return &componentRef{components: make([][]byte, 0, 5)} + }, +} + // parseChunkTimeRangeValue returns the chunkID and labelValue for chunk time // range values. func parseChunkTimeRangeValue(rangeValue []byte, value []byte) ( - chunkID string, labelValue model.LabelValue, isSeriesID bool, err error, + chunkID string, labelValue model.LabelValue, err error, ) { - components := decodeRangeKey(rangeValue) + componentRef := componentsPool.Get().(*componentRef) + defer componentsPool.Put(componentRef) + components := decodeRangeKey(rangeValue, componentRef.components) switch { case len(components) < 3: @@ -225,13 +244,11 @@ func parseChunkTimeRangeValue(rangeValue []byte, value []byte) ( // v9 schema actually return series IDs case seriesRangeKeyV1: chunkID = string(components[0]) - isSeriesID = true return case labelSeriesRangeKeyV1: chunkID = string(components[1]) labelValue = model.LabelValue(value) - isSeriesID = true return } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index 6408a5c5eab08..add31d03cdc1c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -10,11 +10,11 @@ import ( "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -72,7 +72,7 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind // We cache the entire row, so filter client side. callback = chunk_util.QueryFilter(callback) - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 704ca5f29ef0f..6097b434b1033 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -117,6 +117,9 @@ func (cfg *Config) Validate() error { if err := cfg.AzureStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Azure Storage config") } + if err := cfg.AWSStorageConfig.Validate(); err != nil { + return errors.Wrap(err, "invalid AWS Storage config") + } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index 7426f19f3213d..eda8a83f753fd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -14,7 +14,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/mtime" @@ -141,7 +141,7 @@ func (cfg *TableManagerConfig) Validate() error { func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ThroughputUpdatesDisabled, "table-manager.throughput-updates-disabled", false, "If true, disable all changes to DB capacity") f.BoolVar(&cfg.RetentionDeletesEnabled, "table-manager.retention-deletes-enabled", false, "If true, enables retention deletes of DB tables") - f.Var(&cfg.RetentionPeriodModel, "table-manager.retention-period", "Tables older than this retention period are deleted. Note: This setting is destructive to data!(default: 0, which disables deletion)") + f.Var(&cfg.RetentionPeriodModel, "table-manager.retention-period", "Tables older than this retention period are deleted. Must be either 0 (disabled) or a multiple of 24h. When enabled, be aware this setting is destructive to data!") f.DurationVar(&cfg.PollInterval, "table-manager.poll-interval", 2*time.Minute, "How frequently to poll backend to learn our capacity.") f.DurationVar(&cfg.CreationGracePeriod, "table-manager.periodic-table.grace-period", 10*time.Minute, "Periodic tables grace period (duration which table will be created/deleted before/after it's needed).") @@ -470,7 +470,7 @@ func (m *TableManager) partitionTables(ctx context.Context, descriptions []Table func (m *TableManager) createTables(ctx context.Context, descriptions []TableDesc) error { numFailures := 0 - merr := tsdberrors.MultiError{} + merr := tsdb_errors.NewMulti() for _, desc := range descriptions { level.Info(util.Logger).Log("msg", "creating table", "table", desc.Name) @@ -487,7 +487,7 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []TableDes func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDesc) error { numFailures := 0 - merr := tsdberrors.MultiError{} + merr := tsdb_errors.NewMulti() for _, desc := range descriptions { level.Info(util.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go index 411f66d336e62..c6ddf87402e61 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go @@ -11,14 +11,15 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/objstore" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -27,6 +28,7 @@ type BlocksCleanerConfig struct { MetaSyncConcurrency int DeletionDelay time.Duration CleanupInterval time.Duration + CleanupConcurrency int } type BlocksCleaner struct { @@ -35,7 +37,7 @@ type BlocksCleaner struct { cfg BlocksCleanerConfig logger log.Logger bucketClient objstore.Bucket - usersScanner *UsersScanner + usersScanner *cortex_tsdb.UsersScanner // Metrics. runsStarted prometheus.Counter @@ -46,7 +48,7 @@ type BlocksCleaner struct { blocksFailedTotal prometheus.Counter } -func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *UsersScanner, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner { +func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner { c := &BlocksCleaner{ cfg: cfg, bucketClient: bucketClient, @@ -98,49 +100,91 @@ func (c *BlocksCleaner) ticker(ctx context.Context) error { } func (c *BlocksCleaner) runCleanup(ctx context.Context) { - level.Info(c.logger).Log("msg", "started hard deletion of blocks marked for deletion") + level.Info(c.logger).Log("msg", "started hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion") c.runsStarted.Inc() if err := c.cleanUsers(ctx); err == nil { - level.Info(c.logger).Log("msg", "successfully completed hard deletion of blocks marked for deletion") + level.Info(c.logger).Log("msg", "successfully completed hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion") c.runsCompleted.Inc() c.runsLastSuccess.SetToCurrentTime() } else if errors.Is(err, context.Canceled) { - level.Info(c.logger).Log("msg", "canceled hard deletion of blocks marked for deletion", "err", err) + level.Info(c.logger).Log("msg", "canceled hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion", "err", err) return } else { - level.Error(c.logger).Log("msg", "failed to hard delete blocks marked for deletion", "err", err.Error()) + level.Error(c.logger).Log("msg", "failed to hard delete blocks marked for deletion, and blocks for tenants marked for deletion", "err", err.Error()) c.runsFailed.Inc() } } func (c *BlocksCleaner) cleanUsers(ctx context.Context) error { - users, err := c.usersScanner.ScanUsers(ctx) + users, deleted, err := c.usersScanner.ScanUsers(ctx) if err != nil { return errors.Wrap(err, "failed to discover users from bucket") } - errs := tsdb_errors.MultiError{} - for _, userID := range users { - // Ensure the context has not been canceled (ie. shutdown has been triggered). - if ctx.Err() != nil { - return ctx.Err() + isDeleted := map[string]bool{} + for _, userID := range deleted { + isDeleted[userID] = true + } + + allUsers := append(users, deleted...) + return concurrency.ForEachUser(ctx, allUsers, c.cfg.CleanupConcurrency, func(ctx context.Context, userID string) error { + if isDeleted[userID] { + return errors.Wrapf(c.deleteUser(ctx, userID), "failed to delete blocks for user marked for deletion: %s", userID) } + return errors.Wrapf(c.cleanUser(ctx, userID), "failed to delete blocks for user: %s", userID) + }) +} - if err = c.cleanUser(ctx, userID); err != nil { - errs.Add(errors.Wrapf(err, "failed to delete user blocks (user: %s)", userID)) - continue +// Remove all blocks for user marked for deletion. +func (c *BlocksCleaner) deleteUser(ctx context.Context, userID string) error { + userLogger := util.WithUserID(userID, c.logger) + userBucket := bucket.NewUserBucketClient(userID, c.bucketClient) + + level.Info(userLogger).Log("msg", "deleting blocks for user marked for deletion") + + var deleted, failed int + err := userBucket.Iter(ctx, "", func(name string) error { + if err := ctx.Err(); err != nil { + return err + } + + id, ok := block.IsBlockDir(name) + if !ok { + return nil + } + + err := block.Delete(ctx, userLogger, userBucket, id) + if err != nil { + failed++ + c.blocksFailedTotal.Inc() + level.Warn(userLogger).Log("msg", "failed to delete block", "block", id, "err", err) + return nil // Continue with other blocks. } + + deleted++ + c.blocksCleanedTotal.Inc() + level.Info(userLogger).Log("msg", "deleted block", "block", id) + return nil + }) + + if err != nil { + return err } - return errs.Err() + if failed > 0 { + return errors.Errorf("failed to delete %d blocks", failed) + } + + level.Info(userLogger).Log("msg", "finished deleting blocks for user marked for deletion", "deletedBlocks", deleted) + return nil } func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string) error { userLogger := util.WithUserID(userID, c.logger) - userBucket := cortex_tsdb.NewUserBucketClient(userID, c.bucketClient) + userBucket := bucket.NewUserBucketClient(userID, c.bucketClient) - ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, c.cfg.DeletionDelay) + ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, c.cfg.DeletionDelay, c.cfg.MetaSyncConcurrency) fetcher, err := block.NewMetaFetcher( userLogger, @@ -188,7 +232,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string) error { return nil } -func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, userBucket *cortex_tsdb.UserBucketClient, userLogger log.Logger) { +func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, userBucket *bucket.UserBucketClient, userLogger log.Logger) { for blockID, blockErr := range partials { // We can safely delete only blocks which are partial because the meta.json is missing. if blockErr != block.ErrorSyncMetaNotFound { @@ -196,8 +240,8 @@ func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map } // We can safely delete only partial blocks with a deletion mark. - _, err := metadata.ReadDeletionMark(ctx, userBucket, userLogger, blockID.String()) - if err == metadata.ErrorDeletionMarkNotFound { + err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) + if err == metadata.ErrorMarkerNotFound { continue } if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go index f6f08dcf3fbb4..d52b776a839aa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "hash/fnv" + "math/rand" "path" "strings" "time" @@ -22,11 +23,18 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" ) +var ( + errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" +) + // Config holds the Compactor config. type Config struct { BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` @@ -37,8 +45,12 @@ type Config struct { CompactionInterval time.Duration `yaml:"compaction_interval"` CompactionRetries int `yaml:"compaction_retries"` CompactionConcurrency int `yaml:"compaction_concurrency"` + CleanupConcurrency int `yaml:"cleanup_concurrency"` DeletionDelay time.Duration `yaml:"deletion_delay"` + EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` + DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` + // Compactors sharding. ShardingEnabled bool `yaml:"sharding_enabled"` ShardingRing RingConfig `yaml:"sharding_ring"` @@ -66,11 +78,26 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", time.Hour, "The frequency at which the compaction runs") f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction during a single compaction interval") f.IntVar(&cfg.CompactionConcurrency, "compactor.compaction-concurrency", 1, "Max number of concurrent compactions running.") + f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks should be cleaned up concurrently (deletion of blocks previously marked for deletion).") f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ "If not 0, blocks will be marked for deletion and compactor component will delete blocks marked for deletion from the bucket. "+ "If delete-delay is 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures, "+ "if store gateway still has the block loaded, or compactor is ignoring the deletion because it's compacting the block at the same time.") + + f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.") + f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.") +} + +func (cfg *Config) Validate() error { + // Each block range period should be divisible by the previous one. + for i := 1; i < len(cfg.BlockRanges); i++ { + if cfg.BlockRanges[i]%cfg.BlockRanges[i-1] != 0 { + return errors.Errorf(errInvalidBlockRanges, cfg.BlockRanges[i].String(), cfg.BlockRanges[i-1].String()) + } + } + + return nil } // Compactor is a multi-tenant TSDB blocks compactor based on Thanos. @@ -83,18 +110,25 @@ type Compactor struct { parentLogger log.Logger registerer prometheus.Registerer - // Function that creates bucket client and TSDB compactor using the context. + // If empty, all users are enabled. If not empty, only users in the map are enabled (possibly owned by compactor, also subject to sharding configuration). + enabledUsers map[string]struct{} + + // If empty, no users are disabled. If not empty, users in the map are disabled (not owned by this compactor). + disabledUsers map[string]struct{} + + // Function that creates bucket client, TSDB planner and compactor using the context. // Useful for injecting mock objects from tests. - createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) + createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) // Users scanner, used to discover users from the bucket. - usersScanner *UsersScanner + usersScanner *cortex_tsdb.UsersScanner // Blocks cleaner is responsible to hard delete blocks marked for deletion. blocksCleaner *BlocksCleaner - // Underlying compactor used to compact TSDB blocks. + // Underlying compactor and planner used to compact TSDB blocks. tsdbCompactor tsdb.Compactor + tsdbPlanner compact.Planner // Client used to run operations on the bucket storing blocks. bucketClient objstore.Bucket @@ -106,12 +140,16 @@ type Compactor struct { ringSubservicesWatcher *services.FailureWatcher // Metrics. - compactionRunsStarted prometheus.Counter - compactionRunsCompleted prometheus.Counter - compactionRunsFailed prometheus.Counter - compactionRunsLastSuccess prometheus.Gauge - blocksMarkedForDeletion prometheus.Counter - garbageCollectedBlocks prometheus.Counter + compactionRunsStarted prometheus.Counter + compactionRunsCompleted prometheus.Counter + compactionRunsFailed prometheus.Counter + compactionRunsLastSuccess prometheus.Gauge + compactionRunDiscoveredTenants prometheus.Gauge + compactionRunSkippedTenants prometheus.Gauge + compactionRunSucceededTenants prometheus.Gauge + compactionRunFailedTenants prometheus.Gauge + blocksMarkedForDeletion prometheus.Counter + garbageCollectedBlocks prometheus.Counter // TSDB syncer metrics syncerMetrics *syncerMetrics @@ -119,17 +157,22 @@ type Compactor struct { // NewCompactor makes a new Compactor. func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { - createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) { - bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) + createDependencies := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) { + bucketClient, err := bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) if err != nil { - return nil, nil, errors.Wrap(err, "failed to create the bucket client") + return nil, nil, nil, errors.Wrap(err, "failed to create the bucket client") } compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) - return bucketClient, compactor, err + if err != nil { + return nil, nil, nil, err + } + + planner := compact.NewTSDBBasedPlanner(logger, compactorCfg.BlockRanges.ToMilliseconds()) + return bucketClient, compactor, planner, nil } - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createBucketClientAndTsdbCompactor) + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createDependencies) if err != nil { return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") } @@ -142,16 +185,16 @@ func newCompactor( storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer, - createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error), + createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error), ) (*Compactor, error) { c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - createBucketClientAndTsdbCompactor: createBucketClientAndTsdbCompactor, + compactorCfg: compactorCfg, + storageCfg: storageCfg, + parentLogger: logger, + logger: log.With(logger, "component", "compactor"), + registerer: registerer, + syncerMetrics: newSyncerMetrics(registerer), + createDependencies: createDependencies, compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_runs_started_total", @@ -169,6 +212,22 @@ func newCompactor( Name: "cortex_compactor_last_successful_run_timestamp_seconds", Help: "Unix timestamp of the last successful compaction run.", }), + compactionRunDiscoveredTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_compactor_tenants_discovered", + Help: "Number of tenants discovered during the current compaction run. Reset to 0 when compactor is idle.", + }), + compactionRunSkippedTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_compactor_tenants_skipped", + Help: "Number of tenants skipped during the current compaction run. Reset to 0 when compactor is idle.", + }), + compactionRunSucceededTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_compactor_tenants_processing_succeeded", + Help: "Number of tenants successfully processed during the current compaction run. Reset to 0 when compactor is idle.", + }), + compactionRunFailedTenants: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_compactor_tenants_processing_failed", + Help: "Number of tenants failed processing during the current compaction run. Reset to 0 when compactor is idle.", + }), blocksMarkedForDeletion: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_blocks_marked_for_deletion_total", Help: "Total number of blocks marked for deletion in compactor.", @@ -179,6 +238,24 @@ func newCompactor( }), } + if len(compactorCfg.EnabledTenants) > 0 { + c.enabledUsers = map[string]struct{}{} + for _, u := range compactorCfg.EnabledTenants { + c.enabledUsers[u] = struct{}{} + } + + level.Info(c.logger).Log("msg", "using enabled users", "enabled", strings.Join(compactorCfg.EnabledTenants, ", ")) + } + + if len(compactorCfg.DisabledTenants) > 0 { + c.disabledUsers = map[string]struct{}{} + for _, u := range compactorCfg.DisabledTenants { + c.disabledUsers[u] = struct{}{} + } + + level.Info(c.logger).Log("msg", "using disabled users", "disabled", strings.Join(compactorCfg.DisabledTenants, ", ")) + } + c.Service = services.NewBasicService(c.starting, c.running, c.stopping) return c, nil @@ -189,13 +266,16 @@ func (c *Compactor) starting(ctx context.Context) error { var err error // Create bucket client and compactor. - c.bucketClient, c.tsdbCompactor, err = c.createBucketClientAndTsdbCompactor(ctx) + c.bucketClient, c.tsdbCompactor, c.tsdbPlanner, err = c.createDependencies(ctx) if err != nil { return errors.Wrap(err, "failed to initialize compactor objects") } + // Wrap the bucket client to write block deletion marks in the global location too. + c.bucketClient = bucketindex.BucketWithGlobalMarkers(c.bucketClient) + // Create the users scanner. - c.usersScanner = NewUsersScanner(c.bucketClient, c.ownUser, c.parentLogger) + c.usersScanner = cortex_tsdb.NewUsersScanner(c.bucketClient, c.ownUser, c.parentLogger) // Initialize the compactors ring if sharding is enabled. if c.compactorCfg.ShardingEnabled { @@ -232,6 +312,22 @@ func (c *Compactor) starting(ctx context.Context) error { return err } level.Info(c.logger).Log("msg", "compactor is ACTIVE in the ring") + + // In the event of a cluster cold start or scale up of 2+ compactor instances at the same + // time, we may end up in a situation where each new compactor instance starts at a slightly + // different time and thus each one starts with on a different state of the ring. It's better + // to just wait the ring stability for a short time. + if c.compactorCfg.ShardingRing.WaitStabilityMinDuration > 0 { + minWaiting := c.compactorCfg.ShardingRing.WaitStabilityMinDuration + maxWaiting := c.compactorCfg.ShardingRing.WaitStabilityMaxDuration + + level.Info(c.logger).Log("msg", "waiting until compactor ring topology is stable", "min_waiting", minWaiting.String(), "max_waiting", maxWaiting.String()) + if err := ring.WaitRingStability(ctx, c.ring, ring.Compactor, minWaiting, maxWaiting); err != nil { + level.Warn(c.logger).Log("msg", "compactor is ring topology is not stable after the max waiting time, proceeding anyway") + } else { + level.Info(c.logger).Log("msg", "compactor is ring topology is stable") + } + } } // Create the blocks cleaner (service). @@ -239,7 +335,8 @@ func (c *Compactor) starting(ctx context.Context) error { DataDir: c.compactorCfg.DataDir, MetaSyncConcurrency: c.compactorCfg.MetaSyncConcurrency, DeletionDelay: c.compactorCfg.DeletionDelay, - CleanupInterval: util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05), + CleanupInterval: util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.1), + CleanupConcurrency: c.compactorCfg.CleanupConcurrency, }, c.bucketClient, c.usersScanner, c.parentLogger, c.registerer) // Ensure an initial cleanup occurred before starting the compactor. @@ -305,15 +402,32 @@ func (c *Compactor) compactUsersWithRetries(ctx context.Context) { } func (c *Compactor) compactUsers(ctx context.Context) error { + // Reset progress metrics once done. + defer func() { + c.compactionRunDiscoveredTenants.Set(0) + c.compactionRunSkippedTenants.Set(0) + c.compactionRunSucceededTenants.Set(0) + c.compactionRunFailedTenants.Set(0) + }() + level.Info(c.logger).Log("msg", "discovering users from bucket") users, err := c.discoverUsers(ctx) if err != nil { level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) return errors.Wrap(err, "failed to discover users from bucket") } + level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users)) + c.compactionRunDiscoveredTenants.Set(float64(len(users))) + + // When starting multiple compactor replicas nearly at the same time, running in a cluster with + // a large number of tenants, we may end up in a situation where the 1st user is compacted by + // multiple replicas at the same time. Shuffling users helps reduce the likelihood this will happen. + rand.Shuffle(len(users), func(i, j int) { + users[i], users[j] = users[j], users[i] + }) - errs := tsdb_errors.MultiError{} + errs := tsdb_errors.NewMulti() for _, userID := range users { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). @@ -322,25 +436,37 @@ func (c *Compactor) compactUsers(ctx context.Context) error { return ctx.Err() } - // If sharding is enabled, ensure the user ID belongs to our shard. - if c.compactorCfg.ShardingEnabled { - if owned, err := c.ownUser(userID); err != nil { - level.Warn(c.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) - continue - } else if !owned { - level.Debug(c.logger).Log("msg", "skipping user because not owned by this shard", "user", userID) - continue - } + // Ensure the user ID belongs to our shard. + if owned, err := c.ownUser(userID); err != nil { + c.compactionRunSkippedTenants.Inc() + level.Warn(c.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) + continue + } else if !owned { + c.compactionRunSkippedTenants.Inc() + level.Debug(c.logger).Log("msg", "skipping user because it is not owned by this shard", "user", userID) + continue + } + + if markedForDeletion, err := cortex_tsdb.TenantDeletionMarkExists(ctx, c.bucketClient, userID); err != nil { + c.compactionRunSkippedTenants.Inc() + level.Warn(c.logger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err) + continue + } else if markedForDeletion { + c.compactionRunSkippedTenants.Inc() + level.Debug(c.logger).Log("msg", "skipping user because it is marked for deletion", "user", userID) + continue } level.Info(c.logger).Log("msg", "starting compaction of user blocks", "user", userID) if err = c.compactUser(ctx, userID); err != nil { + c.compactionRunFailedTenants.Inc() level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err) errs.Add(errors.Wrapf(err, "failed to compact user blocks (user: %s)", userID)) continue } + c.compactionRunSucceededTenants.Inc() level.Info(c.logger).Log("msg", "successfully compacted user blocks", "user", userID) } @@ -348,7 +474,7 @@ func (c *Compactor) compactUsers(ctx context.Context) error { } func (c *Compactor) compactUser(ctx context.Context, userID string) error { - bucket := cortex_tsdb.NewUserBucketClient(userID, c.bucketClient) + bucket := bucket.NewUserBucketClient(userID, c.bucketClient) reg := prometheus.NewRegistry() defer c.syncerMetrics.gatherThanosSyncerMetrics(reg) @@ -361,7 +487,11 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { // While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter. // The delay of deleteDelay/2 is added to ensure we fetch blocks that are meant to be deleted but do not have a replacement yet. - ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(ulogger, bucket, time.Duration(c.compactorCfg.DeletionDelay.Seconds()/2)*time.Second) + ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter( + ulogger, + bucket, + time.Duration(c.compactorCfg.DeletionDelay.Seconds()/2)*time.Second, + c.compactorCfg.MetaSyncConcurrency) fetcher, err := block.NewMetaFetcher( ulogger, @@ -416,6 +546,7 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { ulogger, syncer, grouper, + c.tsdbPlanner, c.tsdbCompactor, path.Join(c.compactorCfg.DataDir, "compact"), bucket, @@ -444,6 +575,10 @@ func (c *Compactor) discoverUsers(ctx context.Context) ([]string, error) { } func (c *Compactor) ownUser(userID string) (bool, error) { + if !isAllowedUser(c.enabledUsers, c.disabledUsers, userID) { + return false, nil + } + // Always owned if sharding is disabled. if !c.compactorCfg.ShardingEnabled { return true, nil @@ -455,7 +590,7 @@ func (c *Compactor) ownUser(userID string) (bool, error) { userHash := hasher.Sum32() // Check whether this compactor instance owns the user. - rs, err := c.ring.Get(userHash, ring.Read, []ring.IngesterDesc{}) + rs, err := c.ring.Get(userHash, ring.Compactor, []ring.IngesterDesc{}) if err != nil { return false, err } @@ -466,3 +601,19 @@ func (c *Compactor) ownUser(userID string) (bool, error) { return rs.Ingesters[0].Addr == c.ringLifecycler.Addr, nil } + +func isAllowedUser(enabledUsers, disabledUsers map[string]struct{}, userID string) bool { + if len(enabledUsers) > 0 { + if _, ok := enabledUsers[userID]; !ok { + return false + } + } + + if len(disabledUsers) > 0 { + if _, ok := disabledUsers[userID]; ok { + return false + } + } + + return true +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go index 77f8bde3d5303..7bfc930da210f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go @@ -22,6 +22,10 @@ type RingConfig struct { HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + // Wait ring stability. + WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` + WaitStabilityMaxDuration time.Duration `yaml:"wait_stability_max_duration"` + // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` InstanceInterfaceNames []string `yaml:"instance_interface_names"` @@ -45,6 +49,10 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring.") + // Wait stability flags. + f.DurationVar(&cfg.WaitStabilityMinDuration, "compactor.ring.wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") + f.DurationVar(&cfg.WaitStabilityMaxDuration, "compactor.ring.wait-stability-max-duration", 5*time.Minute, "Maximum time to wait for ring stability at startup. If the compactor ring keep changing after this period of time, the compactor will start anyway.") + // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "compactor.ring.instance-interface-names", "Name of network interface to read address from.") @@ -76,7 +84,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { lc.Port = cfg.InstancePort lc.ID = cfg.InstanceID lc.InfNames = cfg.InstanceInterfaceNames - lc.SkipUnregister = false + lc.UnregisterOnShutdown = true lc.HeartbeatPeriod = cfg.HeartbeatPeriod lc.ObservePeriod = 0 lc.JoinAfter = 0 diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/users_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/users_scanner.go deleted file mode 100644 index d9bf752afd62c..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/users_scanner.go +++ /dev/null @@ -1,47 +0,0 @@ -package compactor - -import ( - "context" - "strings" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/thanos-io/thanos/pkg/objstore" -) - -type UsersScanner struct { - bucketClient objstore.Bucket - logger log.Logger - isOwned func(userID string) (bool, error) -} - -func NewUsersScanner(bucketClient objstore.Bucket, isOwned func(userID string) (bool, error), logger log.Logger) *UsersScanner { - return &UsersScanner{ - bucketClient: bucketClient, - logger: logger, - isOwned: isOwned, - } -} - -// ScanUsers returns a fresh list of users found in the storage. If sharding is enabled, -// the returned list contains only the users owned by this instance. -func (s *UsersScanner) ScanUsers(ctx context.Context) ([]string, error) { - var users []string - - err := s.bucketClient.Iter(ctx, "", func(entry string) error { - userID := strings.TrimSuffix(entry, "/") - - // Check if it's owned by this instance. - owned, err := s.isOwned(userID) - if err != nil { - level.Warn(s.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) - } else if !owned { - return nil - } - - users = append(users, userID) - return nil - }) - - return users, err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go index c52b18c070d1e..7ee4a75360e1f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go @@ -19,10 +19,10 @@ import ( "github.com/gorilla/mux" amconfig "github.com/prometheus/alertmanager/config" amtemplate "github.com/prometheus/alertmanager/template" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/configs/userconfig" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" ) @@ -108,7 +108,7 @@ func (a *API) RegisterRoutes(r *mux.Router) { // getConfig returns the request configuration. func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -146,7 +146,7 @@ func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -296,7 +296,7 @@ func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { } func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -318,7 +318,7 @@ func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index 336232f26cf00..d66b581e19761 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -12,6 +12,9 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/promql" + prom_storage "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/server" "github.com/weaveworks/common/signals" "google.golang.org/grpc/health/grpc_health_v1" @@ -20,7 +23,6 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager" "github.com/cortexproject/cortex/pkg/api" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/chunk/purger" "github.com/cortexproject/cortex/pkg/chunk/storage" @@ -31,15 +33,18 @@ import ( "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/flusher" + "github.com/cortexproject/cortex/pkg/frontend" + frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/querier/queryrange" + querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" @@ -47,6 +52,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpc/healthcheck" "github.com/cortexproject/cortex/pkg/util/modules" + "github.com/cortexproject/cortex/pkg/util/process" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -76,33 +82,34 @@ type Config struct { PrintConfig bool `yaml:"-"` HTTPPrefix string `yaml:"http_prefix"` - API api.Config `yaml:"api"` - Server server.Config `yaml:"server"` - Distributor distributor.Config `yaml:"distributor"` - Querier querier.Config `yaml:"querier"` - IngesterClient client.Config `yaml:"ingester_client"` - Ingester ingester.Config `yaml:"ingester"` - Flusher flusher.Config `yaml:"flusher"` - Storage storage.Config `yaml:"storage"` - ChunkStore chunk.StoreConfig `yaml:"chunk_store"` - Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) - LimitsConfig validation.Limits `yaml:"limits"` - Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` - Worker frontend.WorkerConfig `yaml:"frontend_worker"` - Frontend frontend.Config `yaml:"frontend"` - QueryRange queryrange.Config `yaml:"query_range"` - TableManager chunk.TableManagerConfig `yaml:"table_manager"` - Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. - BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` - Compactor compactor.Config `yaml:"compactor"` - StoreGateway storegateway.Config `yaml:"store_gateway"` - PurgerConfig purger.Config `yaml:"purger"` - - Ruler ruler.Config `yaml:"ruler"` - Configs configs.Config `yaml:"configs"` - Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` - RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` - MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + API api.Config `yaml:"api"` + Server server.Config `yaml:"server"` + Distributor distributor.Config `yaml:"distributor"` + Querier querier.Config `yaml:"querier"` + IngesterClient client.Config `yaml:"ingester_client"` + Ingester ingester.Config `yaml:"ingester"` + Flusher flusher.Config `yaml:"flusher"` + Storage storage.Config `yaml:"storage"` + ChunkStore chunk.StoreConfig `yaml:"chunk_store"` + Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) + LimitsConfig validation.Limits `yaml:"limits"` + Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` + Worker querier_worker.Config `yaml:"frontend_worker"` + Frontend frontend.CombinedFrontendConfig `yaml:"frontend"` + QueryRange queryrange.Config `yaml:"query_range"` + TableManager chunk.TableManagerConfig `yaml:"table_manager"` + Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` + Compactor compactor.Config `yaml:"compactor"` + StoreGateway storegateway.Config `yaml:"store_gateway"` + PurgerConfig purger.Config `yaml:"purger"` + + Ruler ruler.Config `yaml:"ruler"` + Configs configs.Config `yaml:"configs"` + Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` + RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + QueryScheduler scheduler.Config `yaml:"query_scheduler"` } // RegisterFlags registers flag. @@ -148,6 +155,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Alertmanager.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) c.MemberlistKV.RegisterFlags(f, "") + c.QueryScheduler.RegisterFlags(f) // These don't seem to have a home. f.IntVar(&chunk_util.QueryParallelism, "querier.query-parallelism", 100, "Max subqueries run in parallel per higher-level query.") @@ -169,10 +177,10 @@ func (c *Config) Validate(log log.Logger) error { if err := c.Storage.Validate(); err != nil { return errors.Wrap(err, "invalid storage config") } - if err := c.ChunkStore.Validate(); err != nil { + if err := c.ChunkStore.Validate(log); err != nil { return errors.Wrap(err, "invalid chunk store config") } - if err := c.Ruler.Validate(c.LimitsConfig); err != nil { + if err := c.Ruler.Validate(c.LimitsConfig, log); err != nil { return errors.Wrap(err, "invalid ruler config") } if err := c.BlocksStorage.Validate(); err != nil { @@ -202,6 +210,12 @@ func (c *Config) Validate(log log.Logger) error { if err := c.StoreGateway.Validate(c.LimitsConfig); err != nil { return errors.Wrap(err, "invalid store-gateway config") } + if err := c.Compactor.Validate(); err != nil { + return errors.Wrap(err, "invalid compactor config") + } + if err := c.Alertmanager.Validate(); err != nil { + return errors.Wrap(err, "invalid alertmanager config") + } if c.Storage.Engine == storage.StorageEngineBlocks && c.Querier.SecondStoreEngine != storage.StorageEngineChunks && len(c.Schema.Configs) > 0 { level.Warn(log).Log("schema configuration is not used by the blocks storage engine, and will have no effect") @@ -249,21 +263,23 @@ type Cortex struct { ServiceMap map[string]services.Service ModuleManager *modules.Manager - API *api.API - Server *server.Server - Ring *ring.Ring - Overrides *validation.Overrides - Distributor *distributor.Distributor - Ingester *ingester.Ingester - Flusher *flusher.Flusher - Store chunk.Store - DeletesStore *purger.DeleteStore - Frontend *frontend.Frontend - TableManager *chunk.TableManager - Cache cache.Cache - RuntimeConfig *runtimeconfig.Manager - Purger *purger.Purger - TombstonesLoader *purger.TombstonesLoader + API *api.API + Server *server.Server + Ring *ring.Ring + Overrides *validation.Overrides + Distributor *distributor.Distributor + Ingester *ingester.Ingester + Flusher *flusher.Flusher + Store chunk.Store + DeletesStore *purger.DeleteStore + Frontend *frontendv1.Frontend + TableManager *chunk.TableManager + RuntimeConfig *runtimeconfig.Manager + Purger *purger.Purger + TombstonesLoader *purger.TombstonesLoader + QuerierQueryable prom_storage.SampleAndChunkQueryable + QuerierEngine *promql.Engine + QueryFrontendTripperware queryrange.Tripperware Ruler *ruler.Ruler RulerStorage rules.RuleStore @@ -290,11 +306,15 @@ func New(cfg Config) (*Cortex, error) { // Don't check auth header on TransferChunks, as we weren't originally // sending it and this could cause transfers to fail on update. - // - // Also don't check auth /frontend.Frontend/Process, as this handles - // queries for multiple users. cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, - []string{"/cortex.Ingester/TransferChunks", "/frontend.Frontend/Process"}) + // Also don't check auth for these gRPC methods, since single call is used for multiple users (or no user like health check). + []string{ + "/grpc.health.v1.Health/Check", + "/cortex.Ingester/TransferChunks", + "/frontend.Frontend/Process", + "/schedulerpb.SchedulerForFrontend/FrontendLoop", + "/schedulerpb.SchedulerForQuerier/QuerierLoop", + }) cortex := &Cortex{ Cfg: cfg, @@ -318,6 +338,13 @@ func (t *Cortex) setupThanosTracing() { // Run starts Cortex running, and blocks until a Cortex stops. func (t *Cortex) Run() error { + // Register custom process metrics. + if c, err := process.NewProcessCollector(); err == nil { + prometheus.MustRegister(c) + } else { + level.Warn(util.Logger).Log("msg", "skipped registration of custom process metrics collector", "err", err) + } + for _, module := range t.Cfg.Target { if !t.ModuleManager.IsUserVisibleModule(module) { level.Warn(util.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", module) diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index 0bf3ea5c13a31..5ec08f5c72e38 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -5,15 +5,14 @@ import ( "os" "time" + "github.com/NYTimes/gziphandler" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" prom_storage "github.com/prometheus/prometheus/storage" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" "github.com/cortexproject/cortex/pkg/alertmanager" @@ -26,14 +25,17 @@ import ( "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/flusher" + frontend "github.com/cortexproject/cortex/pkg/frontend" + "github.com/cortexproject/cortex/pkg/frontend/transport" "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/querier/queryrange" + querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -45,31 +47,36 @@ import ( // The various modules that make up Cortex. const ( - API string = "api" - Ring string = "ring" - RuntimeConfig string = "runtime-config" - Overrides string = "overrides" - Server string = "server" - Distributor string = "distributor" - DistributorService string = "distributor-service" - Ingester string = "ingester" - IngesterService string = "ingester-service" - Flusher string = "flusher" - Querier string = "querier" - StoreQueryable string = "store-queryable" - QueryFrontend string = "query-frontend" - Store string = "store" - DeleteRequestsStore string = "delete-requests-store" - TableManager string = "table-manager" - RulerStorage string = "ruler-storage" - Ruler string = "ruler" - Configs string = "configs" - AlertManager string = "alertmanager" - Compactor string = "compactor" - StoreGateway string = "store-gateway" - MemberlistKV string = "memberlist-kv" - Purger string = "purger" - All string = "all" + API string = "api" + Ring string = "ring" + RuntimeConfig string = "runtime-config" + Overrides string = "overrides" + Server string = "server" + Distributor string = "distributor" + DistributorService string = "distributor-service" + Ingester string = "ingester" + IngesterService string = "ingester-service" + Flusher string = "flusher" + Querier string = "querier" + Queryable string = "queryable" + StoreQueryable string = "store-queryable" + QueryFrontend string = "query-frontend" + QueryFrontendTripperware string = "query-frontend-tripperware" + Store string = "store" + DeleteRequestsStore string = "delete-requests-store" + TableManager string = "table-manager" + RulerStorage string = "ruler-storage" + Ruler string = "ruler" + Configs string = "configs" + AlertManager string = "alertmanager" + Compactor string = "compactor" + StoreGateway string = "store-gateway" + MemberlistKV string = "memberlist-kv" + ChunksPurger string = "chunks-purger" + BlocksPurger string = "blocks-purger" + Purger string = "purger" + QueryScheduler string = "query-scheduler" + All string = "all" ) func (t *Cortex) initAPI() (services.Service, error) { @@ -190,58 +197,117 @@ func (t *Cortex) initDistributor() (serv services.Service, err error) { return nil, nil } -func (t *Cortex) initQuerier() (serv services.Service, err error) { +// initQueryable instantiates the queryable and promQL engine used to service queries to +// Cortex. It also registers the API endpoints associated with those two services. +func (t *Cortex) initQueryable() (serv services.Service, err error) { querierRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "querier"}, prometheus.DefaultRegisterer) - queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, querierRegisterer) - - // Prometheus histograms for requests to the querier. - querierRequestDuration := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_request_duration_seconds", - Help: "Time (in seconds) spent serving HTTP requests to the querier.", - Buckets: instrument.DefBuckets, - }, []string{"method", "route", "status_code", "ws"}) - - receivedMessageSize := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_request_message_bytes", - Help: "Size (in bytes) of messages received in the request to the querier.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - - sentMessageSize := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "querier_response_message_bytes", - Help: "Size (in bytes) of messages sent in response by the querier.", - Buckets: middleware.BodySizeBuckets, - }, []string{"method", "route"}) - - inflightRequests := promauto.With(prometheus.DefaultRegisterer).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "querier_inflight_requests", - Help: "Current number of inflight requests to the querier.", - }, []string{"method", "route"}) - - // if we are not configured for single binary mode then the querier needs to register its paths externally - registerExternally := !t.Cfg.isModuleEnabled(All) - handler := t.API.RegisterQuerier(queryable, engine, t.Distributor, registerExternally, t.TombstonesLoader, querierRequestDuration, receivedMessageSize, sentMessageSize, inflightRequests) - - // single binary mode requires a properly configured worker. if the operator did not attempt to configure the - // worker we will attempt an automatic configuration here - if t.Cfg.Worker.Address == "" && t.Cfg.isModuleEnabled(All) { - address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) - level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) - t.Cfg.Worker.Address = address - } - - // Query frontend worker will only be started after all its dependencies are started, not here. - // Worker may also be nil, if not configured, which is OK. - worker, err := frontend.NewWorker(t.Cfg.Worker, t.Cfg.Querier, httpgrpc_server.NewServer(handler), util.Logger) - if err != nil { - return + + // Create a querier queryable and PromQL engine + t.QuerierQueryable, t.QuerierEngine = querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, querierRegisterer) + + // Register the default endpoints that are always enabled for the querier module + t.API.RegisterQueryable(t.QuerierQueryable, t.Distributor) + + return nil, nil +} + +// initQuerier registers an internal HTTP router with a Prometheus API backed by the +// Cortex Queryable. Then it does one of the following: +// +// 1. Query-Frontend Enabled: If Cortex has an All or QueryFrontend target, the internal +// HTTP router is wrapped with Tenant ID parsing middleware and passed to the frontend +// worker. +// +// 2. Querier Standalone: The querier will register the internal HTTP router with the external +// HTTP router for the Prometheus API routes. Then the external HTTP server will be passed +// as a http.Handler to the frontend worker. +// +// Route Diagram: +// +// │ query +// │ request +// │ +// ▼ +// ┌──────────────────┐ QF to ┌──────────────────┐ +// │ external HTTP │ Worker │ │ +// │ router │──────────────▶│ frontend worker │ +// │ │ │ │ +// └──────────────────┘ └──────────────────┘ +// │ │ +// │ +// only in │ │ +// microservice ┌──────────────────┐ │ +// querier │ │ internal Querier │ │ +// ─ ─ ─ ─▶│ router │◀─────┘ +// │ │ +// └──────────────────┘ +// │ +// │ +// /metadata & /chunk ┌─────────────────────┼─────────────────────┐ +// requests │ │ │ +// │ │ │ +// ▼ ▼ ▼ +// ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ +// │ │ │ │ │ │ +// │Querier Queryable │ │ /api/v1 router │ │ /api/prom router │ +// │ │ │ │ │ │ +// └──────────────────┘ └──────────────────┘ └──────────────────┘ +// ▲ │ │ +// │ └──────────┬──────────┘ +// │ ▼ +// │ ┌──────────────────┐ +// │ │ │ +// └──────────────────────│ Prometheus API │ +// │ │ +// └──────────────────┘ +// +func (t *Cortex) initQuerier() (serv services.Service, err error) { + // Create a internal HTTP handler that is configured with the Prometheus API routes and points + // to a Prometheus API struct instantiated with the Cortex Queryable. + internalQuerierRouter := api.NewQuerierHandler( + t.Cfg.API, + t.QuerierQueryable, + t.QuerierEngine, + t.Distributor, + t.TombstonesLoader, + prometheus.DefaultRegisterer, + util.Logger, + ) + + // If the querier is running standalone without the query-frontend or query-scheduler, we must register it's internal + // HTTP handler externally and provide the external Cortex Server HTTP handler to the frontend worker + // to ensure requests it processes use the default middleware instrumentation. + if !t.Cfg.isModuleEnabled(QueryFrontend) && !t.Cfg.isModuleEnabled(QueryScheduler) && !t.Cfg.isModuleEnabled(All) { + // First, register the internal querier handler with the external HTTP server + t.API.RegisterQueryAPI(internalQuerierRouter) + + // Second, set the http.Handler that the frontend worker will use to process requests to point to + // the external HTTP server. This will allow the querier to consolidate query metrics both external + // and internal using the default instrumentation when running as a standalone service. + internalQuerierRouter = t.Server.HTTPServer.Handler + } else { + // Single binary mode requires a query frontend endpoint for the worker. If no frontend or scheduler endpoint + // is configured, Cortex will default to using frontend on localhost on it's own GRPC listening port. + if t.Cfg.Worker.FrontendAddress == "" || t.Cfg.Worker.SchedulerAddress == "" { + address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) + level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) + t.Cfg.Worker.FrontendAddress = address + } + + // If queries are processed using the external HTTP Server, we need wrap the internal querier with + // HTTP router with middleware to parse the tenant ID from the HTTP header and inject it into the + // request context. + internalQuerierRouter = t.API.AuthMiddleware.Wrap(internalQuerierRouter) + } + + // If neither frontend address or scheduler address is configured, no worker is needed. + if t.Cfg.Worker.FrontendAddress == "" && t.Cfg.Worker.SchedulerAddress == "" { + return nil, nil } - return worker, nil + t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent + t.Cfg.Worker.QueryStatsEnabled = t.Cfg.Frontend.Handler.QueryStatsEnabled + return querier_worker.NewQuerierWorker(t.Cfg.Worker, httpgrpc_server.NewServer(internalQuerierRouter), util.Logger, prometheus.DefaultRegisterer) } func (t *Cortex) initStoreQueryables() (services.Service, error) { @@ -374,7 +440,7 @@ func (t *Cortex) initChunkStore() (serv services.Service, err error) { } func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { - if !t.Cfg.PurgerConfig.Enable { + if t.Cfg.Storage.Engine != storage.StorageEngineChunks || !t.Cfg.PurgerConfig.Enable { // until we need to explicitly enable delete series support we need to do create TombstonesLoader without DeleteStore which acts as noop t.TombstonesLoader = purger.NewTombstonesLoader(nil, nil) @@ -399,20 +465,17 @@ func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { return } -func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { +// initQueryFrontendTripperware instantiates the tripperware used by the query frontend +// to optimize Prometheus query requests. +func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { // Load the schema only if sharded queries is set. if t.Cfg.QueryRange.ShardedQueries { - err = t.Cfg.Schema.Load() + err := t.Cfg.Schema.Load() if err != nil { - return + return nil, err } } - t.Frontend, err = frontend.New(t.Cfg.Frontend, t.Overrides, util.Logger, prometheus.DefaultRegisterer) - if err != nil { - return - } - tripperware, cache, err := queryrange.NewTripperware( t.Cfg.QueryRange, util.Logger, @@ -437,21 +500,51 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { if err != nil { return nil, err } - t.Cache = cache - t.Frontend.Wrap(tripperware) - t.API.RegisterQueryFrontend(t.Frontend) + t.QueryFrontendTripperware = tripperware return services.NewIdleService(nil, func(_ error) error { - t.Frontend.Close() - if t.Cache != nil { - t.Cache.Stop() - t.Cache = nil + if cache != nil { + cache.Stop() + cache = nil } return nil }), nil } +func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { + roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend(t.Cfg.Frontend, t.Overrides, t.Cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + + // Wrap roundtripper into Tripperware. + roundTripper = t.QueryFrontendTripperware(roundTripper) + + handler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util.Logger, prometheus.DefaultRegisterer) + if t.Cfg.Frontend.CompressResponses { + handler = gziphandler.GzipHandler(handler) + } + + t.API.RegisterQueryFrontendHandler(handler) + + if frontendV1 != nil { + t.API.RegisterQueryFrontend1(frontendV1) + t.Frontend = frontendV1 + + return services.NewIdleService(nil, func(_ error) error { + frontendV1.Close() + return nil + }), nil + } else if frontendV2 != nil { + t.API.RegisterQueryFrontend2(frontendV2) + + return frontendV2, nil + } + + return nil, nil +} + func (t *Cortex) initTableManager() (services.Service, error) { if t.Cfg.Storage.Engine == storage.StorageEngineBlocks { return nil, nil // table manager isn't used in v2 @@ -635,8 +728,8 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { return t.MemberlistKV, nil } -func (t *Cortex) initPurger() (services.Service, error) { - if !t.Cfg.PurgerConfig.Enable { +func (t *Cortex) initChunksPurger() (services.Service, error) { + if t.Cfg.Storage.Engine != storage.StorageEngineChunks || !t.Cfg.PurgerConfig.Enable { return nil, nil } @@ -650,11 +743,35 @@ func (t *Cortex) initPurger() (services.Service, error) { return nil, err } - t.API.RegisterPurger(t.DeletesStore, t.Cfg.PurgerConfig.DeleteRequestCancelPeriod) + t.API.RegisterChunksPurger(t.DeletesStore, t.Cfg.PurgerConfig.DeleteRequestCancelPeriod) return t.Purger, nil } +func (t *Cortex) initBlocksPurger() (services.Service, error) { + if t.Cfg.Storage.Engine != storage.StorageEngineBlocks { + return nil, nil + } + + purgerAPI, err := purger.NewBlocksPurgerAPI(t.Cfg.BlocksStorage, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + + t.API.RegisterBlocksPurger(purgerAPI) + return nil, nil +} + +func (t *Cortex) initQueryScheduler() (services.Service, error) { + s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, errors.Wrap(err, "query-scheduler init") + } + + t.API.RegisterQueryScheduler(s) + return s, nil +} + func (t *Cortex) setupModuleManager() error { mm := modules.NewManager() @@ -673,8 +790,10 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(IngesterService, t.initIngesterService, modules.UserInvisibleModule) mm.RegisterModule(Flusher, t.initFlusher) + mm.RegisterModule(Queryable, t.initQueryable, modules.UserInvisibleModule) mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(StoreQueryable, t.initStoreQueryables, modules.UserInvisibleModule) + mm.RegisterModule(QueryFrontendTripperware, t.initQueryFrontendTripperware, modules.UserInvisibleModule) mm.RegisterModule(QueryFrontend, t.initQueryFrontend) mm.RegisterModule(TableManager, t.initTableManager) mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule) @@ -683,31 +802,39 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(AlertManager, t.initAlertManager) mm.RegisterModule(Compactor, t.initCompactor) mm.RegisterModule(StoreGateway, t.initStoreGateway) - mm.RegisterModule(Purger, t.initPurger) + mm.RegisterModule(ChunksPurger, t.initChunksPurger, modules.UserInvisibleModule) + mm.RegisterModule(BlocksPurger, t.initBlocksPurger, modules.UserInvisibleModule) + mm.RegisterModule(Purger, nil) + mm.RegisterModule(QueryScheduler, t.initQueryScheduler) mm.RegisterModule(All, nil) // Add dependencies deps := map[string][]string{ - API: {Server}, - Ring: {API, RuntimeConfig, MemberlistKV}, - Overrides: {RuntimeConfig}, - Distributor: {DistributorService, API}, - DistributorService: {Ring, Overrides}, - Store: {Overrides, DeleteRequestsStore}, - Ingester: {IngesterService, API}, - IngesterService: {Overrides, Store, RuntimeConfig, MemberlistKV}, - Flusher: {Store, API}, - Querier: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, - StoreQueryable: {Overrides, Store, MemberlistKV}, - QueryFrontend: {API, Overrides, DeleteRequestsStore}, - TableManager: {API}, - Ruler: {Overrides, DistributorService, Store, StoreQueryable, RulerStorage}, - Configs: {API}, - AlertManager: {API}, - Compactor: {API, MemberlistKV}, - StoreGateway: {API, Overrides, MemberlistKV}, - Purger: {Store, DeleteRequestsStore, API}, - All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, + API: {Server}, + Ring: {API, RuntimeConfig, MemberlistKV}, + Overrides: {RuntimeConfig}, + Distributor: {DistributorService, API}, + DistributorService: {Ring, Overrides}, + Store: {Overrides, DeleteRequestsStore}, + Ingester: {IngesterService, API}, + IngesterService: {Overrides, Store, RuntimeConfig, MemberlistKV}, + Flusher: {Store, API}, + Queryable: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, + Querier: {Queryable}, + StoreQueryable: {Overrides, Store, MemberlistKV}, + QueryFrontendTripperware: {API, Overrides, DeleteRequestsStore}, + QueryFrontend: {QueryFrontendTripperware}, + QueryScheduler: {API, Overrides}, + TableManager: {API}, + Ruler: {Overrides, DistributorService, Store, StoreQueryable, RulerStorage}, + Configs: {API}, + AlertManager: {API}, + Compactor: {API, MemberlistKV}, + StoreGateway: {API, Overrides, MemberlistKV}, + ChunksPurger: {Store, DeleteRequestsStore, API}, + BlocksPurger: {Store, API}, + Purger: {ChunksPurger, BlocksPurger}, + All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } for mod, targets := range deps { if err := mm.AddDependency(mod, targets...); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 2b72301ab72c7..d1445c801a753 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/scrape" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/instrument" @@ -25,6 +26,7 @@ import ( "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/limiter" @@ -383,7 +385,7 @@ func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, user // Push implements client.IngesterServer func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -446,6 +448,11 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie latestSampleTimestampMs = util.Max64(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) } + if mrc := d.limits.MetricRelabelConfigs(userID); len(mrc) > 0 { + l := relabel.Process(client.FromLabelAdaptersToLabels(ts.Labels), mrc...) + ts.Labels = client.FromLabelsToLabelAdapters(l) + } + // If we found both the cluster and replica labels, we only want to include the cluster label when // storing series in Cortex. If we kept the replica label we would end up with another series for the same // series we're trying to dedupe when HA tracking moves over to a different replica. @@ -504,7 +511,7 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie continue } - metadataKeys = append(metadataKeys, d.tokenForMetadata(userID, m.MetricName)) + metadataKeys = append(metadataKeys, d.tokenForMetadata(userID, m.MetricFamilyName)) validatedMetadata = append(validatedMetadata, m) } @@ -637,14 +644,16 @@ func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring } // LabelValuesForLabelName returns all of the label values that are associated with a given label name. -func (d *Distributor) LabelValuesForLabelName(ctx context.Context, labelName model.LabelName) ([]string, error) { +func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName) ([]string, error) { replicationSet, err := d.GetIngestersForMetadata(ctx) if err != nil { return nil, err } req := &client.LabelValuesRequest{ - LabelName: string(labelName), + LabelName: string(labelName), + StartTimestampMs: int64(from), + EndTimestampMs: int64(to), } resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { return client.LabelValues(ctx, req) @@ -664,17 +673,24 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, labelName mod for v := range valueSet { values = append(values, v) } + + // We need the values returned to be sorted. + sort.Strings(values) + return values, nil } // LabelNames returns all of the label names. -func (d *Distributor) LabelNames(ctx context.Context) ([]string, error) { +func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]string, error) { replicationSet, err := d.GetIngestersForMetadata(ctx) if err != nil { return nil, err } - req := &client.LabelNamesRequest{} + req := &client.LabelNamesRequest{ + StartTimestampMs: int64(from), + EndTimestampMs: int64(to), + } resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { return client.LabelNames(ctx, req) }) @@ -693,9 +709,8 @@ func (d *Distributor) LabelNames(ctx context.Context) ([]string, error) { for v := range valueSet { values = append(values, v) } - sort.Slice(values, func(i, j int) bool { - return values[i] < values[j] - }) + + sort.Strings(values) return values, nil } @@ -765,7 +780,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad dedupTracker[*m] = struct{}{} result = append(result, scrape.MetricMetadata{ - Metric: m.MetricName, + Metric: m.MetricFamilyName, Help: m.Help, Unit: m.Unit, Type: client.MetricMetadataMetricTypeToMetricType(m.GetType()), @@ -824,7 +839,7 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { req := &client.UserStatsRequest{} ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID // Not using d.ForReplicationSet(), so we can fail after first error. - replicationSet, err := d.ingestersRing.GetAll(ring.Read) + replicationSet, err := d.ingestersRing.GetAllHealthy(ring.Read) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go index 6eb7e9f5f29cf..af77a5d98bced 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go @@ -76,7 +76,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { lc.Port = cfg.InstancePort lc.ID = cfg.InstanceID lc.InfNames = cfg.InstanceInterfaceNames - lc.SkipUnregister = false + lc.UnregisterOnShutdown = true lc.HeartbeatPeriod = cfg.HeartbeatPeriod lc.ObservePeriod = 0 lc.NumTokens = 1 diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go index c6fd3aa687924..9628fc6432339 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go @@ -9,11 +9,11 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/ingester/client" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" grpc_util "github.com/cortexproject/cortex/pkg/util/grpc" @@ -76,7 +76,7 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matc // GetIngestersForQuery returns a replication set including all ingesters that should be queried // to fetch series matching input label matchers. func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*labels.Matcher) (ring.ReplicationSet, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return ring.ReplicationSet{}, err } @@ -88,7 +88,7 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod if shardSize > 0 && lookbackPeriod > 0 { - return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetAll(ring.Read) + return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) } } @@ -101,13 +101,13 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab } } - return d.ingestersRing.GetAll(ring.Read) + return d.ingestersRing.GetReplicationSetForOperation(ring.Read) } // GetIngestersForMetadata returns a replication set including all ingesters that should be queried // to fetch metadata (eg. label names/values or series). func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.ReplicationSet, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return ring.ReplicationSet{}, err } @@ -119,11 +119,11 @@ func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.Replica lookbackPeriod := d.cfg.ShuffleShardingLookbackPeriod if shardSize > 0 && lookbackPeriod > 0 { - return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetAll(ring.Read) + return d.ingestersRing.ShuffleShardWithLookback(userID, shardSize, lookbackPeriod, time.Now()).GetReplicationSetForOperation(ring.Read) } } - return d.ingestersRing.GetAll(ring.Read) + return d.ingestersRing.GetReplicationSetForOperation(ring.Read) } // queryIngesters queries the ingesters via the older, sample-based API. diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go new file mode 100644 index 0000000000000..88775e1ed525f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/config.go @@ -0,0 +1,80 @@ +package frontend + +import ( + "flag" + "net/http" + + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/frontend/transport" + v1 "github.com/cortexproject/cortex/pkg/frontend/v1" + v2 "github.com/cortexproject/cortex/pkg/frontend/v2" + "github.com/cortexproject/cortex/pkg/util" +) + +// This struct combines several configuration options together to preserve backwards compatibility. +type CombinedFrontendConfig struct { + Handler transport.HandlerConfig `yaml:",inline"` + FrontendV1 v1.Config `yaml:",inline"` + FrontendV2 v2.Config `yaml:",inline"` + + // Deprecated. Replaced with pkg/api/Config.ResponseCompression field. + // TODO: To be removed in Cortex 1.8. + CompressResponses bool `yaml:"compress_responses"` + + DownstreamURL string `yaml:"downstream_url"` +} + +func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { + cfg.Handler.RegisterFlags(f) + cfg.FrontendV1.RegisterFlags(f) + cfg.FrontendV2.RegisterFlags(f) + + f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "This flag is about to be deprecated. Please use -api.response-compression-enabled instead.") + + f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") +} + +// Initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at +// all if downstream Prometheus URL is used instead. +// +// Returned RoundTripper can be wrapped in more round-tripper middlewares, and then eventually registered +// into HTTP server using the Handler from this package. Returned RoundTripper is always non-nil +// (if there are no errors), and it uses the returned frontend (if any). +func InitFrontend(cfg CombinedFrontendConfig, limits v1.Limits, grpcListenPort int, log log.Logger, reg prometheus.Registerer) (http.RoundTripper, *v1.Frontend, *v2.Frontend, error) { + switch { + case cfg.DownstreamURL != "": + // If the user has specified a downstream Prometheus, then we should use that. + rt, err := NewDownstreamRoundTripper(cfg.DownstreamURL) + return rt, nil, nil, err + + case cfg.FrontendV2.SchedulerAddress != "": + // If query-scheduler address is configured, use Frontend. + if cfg.FrontendV2.Addr == "" { + addr, err := util.GetFirstAddressOf(cfg.FrontendV2.InfNames) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "failed to get frontend address") + } + + cfg.FrontendV2.Addr = addr + } + + if cfg.FrontendV2.Port == 0 { + cfg.FrontendV2.Port = grpcListenPort + } + + fr, err := v2.NewFrontend(cfg.FrontendV2, log, reg) + return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), nil, fr, err + + default: + // No scheduler = use original frontend. + fr, err := v1.New(cfg.FrontendV1, limits, log, reg) + if err != nil { + return nil, nil, nil, err + } + + return transport.AdaptGrpcRoundTripperToHTTPRoundTripper(fr), fr, nil, err + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go new file mode 100644 index 0000000000000..f0c342eac4a8c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/downstream_roundtripper.go @@ -0,0 +1,40 @@ +package frontend + +import ( + "net/http" + "net/url" + "path" + + "github.com/opentracing/opentracing-go" +) + +// RoundTripper that forwards requests to downstream URL. +type downstreamRoundTripper struct { + downstreamURL *url.URL +} + +func NewDownstreamRoundTripper(downstreamURL string) (http.RoundTripper, error) { + u, err := url.Parse(downstreamURL) + if err != nil { + return nil, err + } + + return &downstreamRoundTripper{downstreamURL: u}, nil +} + +func (d downstreamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) + if tracer != nil && span != nil { + carrier := opentracing.HTTPHeadersCarrier(r.Header) + err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) + if err != nil { + return nil, err + } + } + + r.URL.Scheme = d.downstreamURL.Scheme + r.URL.Host = d.downstreamURL.Host + r.URL.Path = path.Join(d.downstreamURL.Path, r.URL.Path) + r.Host = "" + return http.DefaultTransport.RoundTrip(r) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go new file mode 100644 index 0000000000000..a043588e89f3d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go @@ -0,0 +1,202 @@ +package transport + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/httpgrpc/server" + + querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // StatusClientClosedRequest is the status code for when a client request cancellation of an http request + StatusClientClosedRequest = 499 +) + +var ( + errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) + errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) + errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large") +) + +// Config for a Handler. +type HandlerConfig struct { + LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` + MaxBodySize int64 `yaml:"max_body_size"` + QueryStatsEnabled bool `yaml:"query_stats_enabled"` +} + +func (cfg *HandlerConfig) RegisterFlags(f *flag.FlagSet) { + f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") + f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", 10*1024*1024, "Max body size for downstream prometheus.") + f.BoolVar(&cfg.QueryStatsEnabled, "frontend.query-stats-enabled", false, "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query. This configuration option must be set both on query-frontend and querier.") +} + +// Handler accepts queries and forwards them to RoundTripper. It can log slow queries, +// but all other logic is inside the RoundTripper. +type Handler struct { + cfg HandlerConfig + log log.Logger + roundTripper http.RoundTripper + + // Metrics. + querySeconds *prometheus.CounterVec +} + +// New creates a new frontend handler. +func NewHandler(cfg HandlerConfig, roundTripper http.RoundTripper, log log.Logger, reg prometheus.Registerer) http.Handler { + h := &Handler{ + cfg: cfg, + log: log, + roundTripper: roundTripper, + } + + if cfg.QueryStatsEnabled { + h.querySeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_query_seconds_total", + Help: "Total amount of wall clock time spend processing queries.", + }, []string{"user"}) + } + + return h +} + +func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var ( + stats *querier_stats.Stats + queryString url.Values + ) + + // Initialise the stats in the context and make sure it's propagated + // down the request chain. + if f.cfg.QueryStatsEnabled { + var ctx context.Context + stats, ctx = querier_stats.ContextWithEmptyStats(r.Context()) + r = r.WithContext(ctx) + } + + defer func() { + _ = r.Body.Close() + }() + + // Buffer the body for later use to track slow queries. + var buf bytes.Buffer + r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize) + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf)) + + startTime := time.Now() + resp, err := f.roundTripper.RoundTrip(r) + queryResponseTime := time.Since(startTime) + + if err != nil { + writeError(w, err) + return + } + + hs := w.Header() + for h, vs := range resp.Header { + hs[h] = vs + } + + w.WriteHeader(resp.StatusCode) + // we don't check for copy error as there is no much we can do at this point + _, _ = io.Copy(w, resp.Body) + + // Check whether we should parse the query string. + shouldReportSlowQuery := f.cfg.LogQueriesLongerThan > 0 && queryResponseTime > f.cfg.LogQueriesLongerThan + if shouldReportSlowQuery || f.cfg.QueryStatsEnabled { + queryString = f.parseRequestQueryString(r, buf) + } + + if shouldReportSlowQuery { + f.reportSlowQuery(r, queryString, queryResponseTime) + } + if f.cfg.QueryStatsEnabled { + f.reportQueryStats(r, queryString, queryResponseTime, stats) + } +} + +// reportSlowQuery reports slow queries. +func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, queryResponseTime time.Duration) { + logMessage := append([]interface{}{ + "msg", "slow query detected", + "method", r.Method, + "host", r.Host, + "path", r.URL.Path, + "time_taken", queryResponseTime.String(), + }, formatQueryString(queryString)...) + + level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) +} + +func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.Stats) { + userID, err := tenant.TenantID(r.Context()) + if err != nil { + return + } + + // Track stats. + f.querySeconds.WithLabelValues(userID).Add(stats.LoadWallTime().Seconds()) + + // Log stats. + logMessage := append([]interface{}{ + "msg", "query stats", + "method", r.Method, + "path", r.URL.Path, + "response_time", queryResponseTime, + "query_wall_time", stats.LoadWallTime(), + }, formatQueryString(queryString)...) + + level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) +} + +func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values { + // Use previously buffered body. + r.Body = ioutil.NopCloser(&bodyBuf) + + // Ensure the form has been parsed so all the parameters are present + err := r.ParseForm() + if err != nil { + level.Warn(util.WithContext(r.Context(), f.log)).Log("msg", "unable to parse request form", "err", err) + return nil + } + + return r.Form +} + +func formatQueryString(queryString url.Values) (fields []interface{}) { + for k, v := range queryString { + fields = append(fields, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) + } + return fields +} + +func writeError(w http.ResponseWriter, err error) { + switch err { + case context.Canceled: + err = errCanceled + case context.DeadlineExceeded: + err = errDeadlineExceeded + default: + if strings.Contains(err.Error(), "http: request body too large") { + err = errRequestEntityTooLarge + } + } + server.WriteError(w, err) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go new file mode 100644 index 0000000000000..065d7fdca6e1c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/roundtripper.go @@ -0,0 +1,47 @@ +package transport + +import ( + "bytes" + "context" + "io/ioutil" + "net/http" + + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/httpgrpc/server" +) + +// GrpcRoundTripper is similar to http.RoundTripper, but works with HTTP requests converted to protobuf messages. +type GrpcRoundTripper interface { + RoundTripGRPC(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) +} + +func AdaptGrpcRoundTripperToHTTPRoundTripper(r GrpcRoundTripper) http.RoundTripper { + return &grpcRoundTripperAdapter{roundTripper: r} +} + +// This adapter wraps GrpcRoundTripper and converted it into http.RoundTripper +type grpcRoundTripperAdapter struct { + roundTripper GrpcRoundTripper +} + +func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, error) { + req, err := server.HTTPRequest(r) + if err != nil { + return nil, err + } + + resp, err := a.roundTripper.RoundTripGRPC(r.Context(), req) + if err != nil { + return nil, err + } + + httpResp := &http.Response{ + StatusCode: int(resp.Code), + Body: ioutil.NopCloser(bytes.NewReader(resp.Body)), + Header: http.Header{}, + } + for _, h := range resp.Headers { + httpResp.Header[h.Key] = h.Values + } + return httpResp, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go new file mode 100644 index 0000000000000..385ec26e9fa09 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go @@ -0,0 +1,288 @@ +package v1 + +import ( + "context" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" + "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/scheduler/queue" + "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/grpcutil" +) + +var ( + errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") +) + +// Config for a Frontend. +type Config struct { + MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxOutstandingPerTenant, "querier.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429.") +} + +type Limits interface { + // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. + MaxQueriersPerUser(user string) int +} + +// Frontend queues HTTP requests, dispatches them to backends, and handles retries +// for requests which failed. +type Frontend struct { + cfg Config + log log.Logger + limits Limits + + requestQueue *queue.RequestQueue + + // Metrics. + numClients prometheus.GaugeFunc + queueDuration prometheus.Histogram +} + +type request struct { + enqueueTime time.Time + queueSpan opentracing.Span + originalCtx context.Context + + request *httpgrpc.HTTPRequest + err chan error + response chan *httpgrpc.HTTPResponse +} + +// New creates a new frontend. +func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Frontend, error) { + queueLength := promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_query_frontend_queue_length", + Help: "Number of queries in the queue.", + }, []string{"user"}) + + f := &Frontend{ + cfg: cfg, + log: log, + limits: limits, + requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, queueLength), + queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_query_frontend_queue_duration_seconds", + Help: "Time spend by requests queued.", + Buckets: prometheus.DefBuckets, + }), + } + + f.numClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_frontend_connected_clients", + Help: "Number of worker clients currently connected to the frontend.", + }, f.requestQueue.GetConnectedQuerierWorkersMetric) + + return f, nil +} + +// Close stops new requests and errors out any pending requests. +func (f *Frontend) Close() { + f.requestQueue.Stop() +} + +// RoundTripGRPC round trips a proto (instead of a HTTP request). +func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { + // Propagate trace context in gRPC too - this will be ignored if using HTTP. + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) + if tracer != nil && span != nil { + carrier := (*grpcutil.HttpgrpcHeadersCarrier)(req) + err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) + if err != nil { + return nil, err + } + } + + request := request{ + request: req, + originalCtx: ctx, + + // Buffer of 1 to ensure response can be written by the server side + // of the Process stream, even if this goroutine goes away due to + // client context cancellation. + err: make(chan error, 1), + response: make(chan *httpgrpc.HTTPResponse, 1), + } + + if err := f.queueRequest(ctx, &request); err != nil { + return nil, err + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + + case resp := <-request.response: + return resp, nil + + case err := <-request.err: + return nil, err + } +} + +// Process allows backends to pull requests from the frontend. +func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { + querierID, err := getQuerierID(server) + if err != nil { + return err + } + + f.requestQueue.RegisterQuerierConnection(querierID) + defer f.requestQueue.UnregisterQuerierConnection(querierID) + + // If the downstream request(from querier -> frontend) is cancelled, + // we need to ping the condition variable to unblock getNextRequestForQuerier. + // Ideally we'd have ctx aware condition variables... + go func() { + <-server.Context().Done() + f.requestQueue.QuerierDisconnecting() + }() + + lastUserIndex := queue.FirstUser() + + for { + reqWrapper, idx, err := f.requestQueue.GetNextRequestForQuerier(server.Context(), lastUserIndex, querierID) + if err != nil { + return err + } + lastUserIndex = idx + + req := reqWrapper.(*request) + + f.queueDuration.Observe(time.Since(req.enqueueTime).Seconds()) + req.queueSpan.Finish() + + /* + We want to dequeue the next unexpired request from the chosen tenant queue. + The chance of choosing a particular tenant for dequeueing is (1/active_tenants). + This is problematic under load, especially with other middleware enabled such as + querier.split-by-interval, where one request may fan out into many. + If expired requests aren't exhausted before checking another tenant, it would take + n_active_tenants * n_expired_requests_at_front_of_queue requests being processed + before an active request was handled for the tenant in question. + If this tenant meanwhile continued to queue requests, + it's possible that it's own queue would perpetually contain only expired requests. + */ + if req.originalCtx.Err() != nil { + lastUserIndex = lastUserIndex.ReuseLastUser() + continue + } + + // Handle the stream sending & receiving on a goroutine so we can + // monitoring the contexts in a select and cancel things appropriately. + resps := make(chan *frontendv1pb.ClientToFrontend, 1) + errs := make(chan error, 1) + go func() { + err = server.Send(&frontendv1pb.FrontendToClient{ + Type: frontendv1pb.HTTP_REQUEST, + HttpRequest: req.request, + }) + if err != nil { + errs <- err + return + } + + resp, err := server.Recv() + if err != nil { + errs <- err + return + } + + resps <- resp + }() + + select { + // If the upstream request is cancelled, we need to cancel the + // downstream req. Only way we can do that is to close the stream. + // The worker client is expecting this semantics. + case <-req.originalCtx.Done(): + return req.originalCtx.Err() + + // Is there was an error handling this request due to network IO, + // then error out this upstream request _and_ stream. + case err := <-errs: + req.err <- err + return err + + // Happy path: merge the stats and propagate the response. + case resp := <-resps: + if stats.ShouldTrackHTTPGRPCResponse(resp.HttpResponse) { + stats := stats.FromContext(req.originalCtx) + stats.Merge(resp.Stats) // Safe if stats is nil. + } + + req.response <- resp.HttpResponse + } + } +} + +func getQuerierID(server frontendv1pb.Frontend_ProcessServer) (string, error) { + err := server.Send(&frontendv1pb.FrontendToClient{ + Type: frontendv1pb.GET_ID, + // Old queriers don't support GET_ID, and will try to use the request. + // To avoid confusing them, include dummy request. + HttpRequest: &httpgrpc.HTTPRequest{ + Method: "GET", + Url: "/invalid_request_sent_by_frontend", + }, + }) + + if err != nil { + return "", err + } + + resp, err := server.Recv() + + // Old queriers will return empty string, which is fine. All old queriers will be + // treated as single querier with lot of connections. + // (Note: if resp is nil, GetClientID() returns "") + return resp.GetClientID(), err +} + +func (f *Frontend) queueRequest(ctx context.Context, req *request) error { + userID, err := tenant.TenantID(ctx) + if err != nil { + return err + } + + req.enqueueTime = time.Now() + req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") + + maxQueriers := f.limits.MaxQueriersPerUser(userID) + + err = f.requestQueue.EnqueueRequest(userID, req, maxQueriers, nil) + if err == queue.ErrTooManyRequests { + return errTooManyRequest + } + return err +} + +// CheckReady determines if the query frontend is ready. Function parameters/return +// chosen to match the same method in the ingester +func (f *Frontend) CheckReady(_ context.Context) error { + // if we have more than one querier connected we will consider ourselves ready + connectedClients := f.requestQueue.GetConnectedQuerierWorkersMetric() + if connectedClients > 0 { + return nil + } + + msg := fmt.Sprintf("not ready: number of queriers connected to query-frontend is %d", int64(connectedClients)) + level.Info(f.log).Log("msg", msg) + return errors.New(msg) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.pb.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go similarity index 82% rename from vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.pb.go rename to vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go index 841fc7148cb68..2fae54a845a3f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go @@ -1,11 +1,15 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: frontend.proto -package frontend +// Protobuf package should not be changed when moving around go packages +// in order to not break backward compatibility. + +package frontendv1pb import ( context "context" fmt "fmt" + stats "github.com/cortexproject/cortex/pkg/querier/stats" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" httpgrpc "github.com/weaveworks/common/httpgrpc" @@ -106,6 +110,7 @@ func (m *FrontendToClient) GetType() Type { type ClientToFrontend struct { HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` ClientID string `protobuf:"bytes,2,opt,name=clientID,proto3" json:"clientID,omitempty"` + Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` } func (m *ClientToFrontend) Reset() { *m = ClientToFrontend{} } @@ -154,6 +159,13 @@ func (m *ClientToFrontend) GetClientID() string { return "" } +func (m *ClientToFrontend) GetStats() *stats.Stats { + if m != nil { + return m.Stats + } + return nil +} + func init() { proto.RegisterEnum("frontend.Type", Type_name, Type_value) proto.RegisterType((*FrontendToClient)(nil), "frontend.FrontendToClient") @@ -163,30 +175,34 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xb1, 0x4e, 0x2a, 0x41, - 0x14, 0x86, 0x67, 0x6e, 0x08, 0x97, 0x3b, 0x10, 0xb2, 0x99, 0xe4, 0xde, 0x90, 0x2d, 0x26, 0x64, - 0x73, 0x0b, 0x62, 0xe2, 0xae, 0x41, 0x13, 0x13, 0x0b, 0x0b, 0x05, 0x91, 0x0e, 0x97, 0xb1, 0xb1, - 0x21, 0xb2, 0x0e, 0x0b, 0x2a, 0x7b, 0xd6, 0xdd, 0x41, 0x42, 0xe7, 0x23, 0xf8, 0x18, 0x3e, 0x8a, - 0x25, 0x25, 0xa5, 0x0c, 0x8d, 0x25, 0x8f, 0x60, 0x98, 0x85, 0x15, 0xe9, 0xce, 0x9f, 0xff, 0x3f, - 0xe7, 0x3b, 0x33, 0x87, 0x14, 0x7b, 0x11, 0x04, 0x52, 0x04, 0x77, 0x76, 0x18, 0x81, 0x04, 0x9a, - 0xdb, 0x68, 0x73, 0xdf, 0x1f, 0xc8, 0xfe, 0xa8, 0x6b, 0x7b, 0x30, 0x74, 0x7c, 0xf0, 0xc1, 0xd1, - 0x81, 0xee, 0xa8, 0xa7, 0x95, 0x16, 0xba, 0x4a, 0x1a, 0xcd, 0xa3, 0xad, 0xf8, 0x58, 0xdc, 0x3e, - 0x8b, 0x31, 0x44, 0x0f, 0xb1, 0xe3, 0xc1, 0x70, 0x08, 0x81, 0xd3, 0x97, 0x32, 0xf4, 0xa3, 0xd0, - 0x4b, 0x8b, 0xa4, 0xcb, 0x02, 0x62, 0x5c, 0xac, 0x81, 0x1c, 0xce, 0x1f, 0x07, 0x22, 0x90, 0xf4, - 0x98, 0xe4, 0x57, 0x29, 0x57, 0x3c, 0x8d, 0x44, 0x2c, 0x4b, 0xb8, 0x8c, 0x2b, 0xf9, 0xea, 0x5f, - 0x3b, 0xed, 0xbc, 0xe4, 0xbc, 0xb5, 0x36, 0xdd, 0xed, 0x24, 0xb5, 0x48, 0x46, 0x4e, 0x42, 0x51, - 0xfa, 0x55, 0xc6, 0x95, 0x62, 0xb5, 0x68, 0xa7, 0x4f, 0xe3, 0x93, 0x50, 0xb8, 0xda, 0xb3, 0xee, - 0x89, 0x91, 0x60, 0x38, 0x6c, 0xc0, 0xf4, 0x84, 0x14, 0x92, 0x31, 0x71, 0x08, 0x41, 0x2c, 0xd6, - 0xc4, 0x7f, 0xbb, 0xc4, 0xc4, 0x75, 0x7f, 0x64, 0xa9, 0x49, 0x72, 0x9e, 0x9e, 0xd7, 0xac, 0x69, - 0xee, 0x1f, 0x37, 0xd5, 0x7b, 0xff, 0x49, 0x66, 0x45, 0xa6, 0x06, 0x29, 0xac, 0x26, 0x74, 0xdc, - 0xfa, 0xd5, 0x75, 0xbd, 0xcd, 0x0d, 0x44, 0x09, 0xc9, 0x36, 0xea, 0xbc, 0xd3, 0xac, 0x19, 0xb8, - 0xda, 0x26, 0xb9, 0x74, 0x93, 0x06, 0xf9, 0xdd, 0x8a, 0xc0, 0x13, 0x71, 0x4c, 0xcd, 0xef, 0xf5, - 0x77, 0x17, 0x36, 0xb7, 0xbc, 0xdd, 0xdf, 0xb3, 0x50, 0x05, 0x1f, 0xe0, 0xb3, 0xd3, 0xe9, 0x9c, - 0xa1, 0xd9, 0x9c, 0xa1, 0xe5, 0x9c, 0xe1, 0x17, 0xc5, 0xf0, 0x9b, 0x62, 0xf8, 0x5d, 0x31, 0x3c, - 0x55, 0x0c, 0x7f, 0x28, 0x86, 0x3f, 0x15, 0x43, 0x4b, 0xc5, 0xf0, 0xeb, 0x82, 0xa1, 0xe9, 0x82, - 0xa1, 0xd9, 0x82, 0xa1, 0x9b, 0xf4, 0xf8, 0xdd, 0xac, 0x3e, 0xcf, 0xe1, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x14, 0x4d, 0x19, 0x1d, 0x1f, 0x02, 0x00, 0x00, + // 419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0x6d, 0x18, 0xa3, 0x78, 0x51, 0x15, 0x59, 0x02, 0x55, 0x39, 0x58, 0x53, 0xc4, 0xa1, + 0x42, 0x22, 0x81, 0x82, 0x84, 0x84, 0xc4, 0x65, 0xac, 0x8c, 0xdd, 0x46, 0x1a, 0x2e, 0x5c, 0xa6, + 0x25, 0x78, 0x59, 0x19, 0xcd, 0xf3, 0x6c, 0xa7, 0xa5, 0x37, 0x3e, 0x01, 0xe2, 0x63, 0xf0, 0x51, + 0x38, 0xf6, 0xd8, 0x23, 0x4d, 0x2f, 0x1c, 0xfb, 0x11, 0x50, 0xec, 0x34, 0x64, 0xbd, 0x58, 0xfe, + 0xeb, 0xff, 0xde, 0xfb, 0xbd, 0xbf, 0x4d, 0xba, 0x97, 0x12, 0x72, 0xcd, 0xf3, 0xcf, 0x81, 0x90, + 0xa0, 0x81, 0x76, 0xb6, 0xda, 0x7b, 0x9a, 0x8d, 0xf5, 0x55, 0x91, 0x04, 0x29, 0x4c, 0xc2, 0x0c, + 0x32, 0x08, 0x4d, 0x41, 0x52, 0x5c, 0x1a, 0x65, 0x84, 0xb9, 0xd9, 0x46, 0xef, 0x65, 0xab, 0x7c, + 0xc6, 0x2f, 0xa6, 0x7c, 0x06, 0xf2, 0x5a, 0x85, 0x29, 0x4c, 0x26, 0x90, 0x87, 0x57, 0x5a, 0x8b, + 0x4c, 0x8a, 0xb4, 0xb9, 0xd4, 0x5d, 0x6f, 0x5a, 0x5d, 0x29, 0x48, 0xcd, 0xbf, 0x09, 0x09, 0x5f, + 0x78, 0xaa, 0x6b, 0x15, 0x8a, 0xeb, 0x2c, 0xbc, 0x29, 0xb8, 0x1c, 0x73, 0x19, 0x2a, 0x7d, 0xa1, + 0x95, 0x3d, 0x6d, 0xbb, 0x0f, 0xc4, 0x7d, 0x57, 0xef, 0x1b, 0xc3, 0xdb, 0xaf, 0x63, 0x9e, 0x6b, + 0xfa, 0x8a, 0x1c, 0x54, 0x90, 0x88, 0xdf, 0x14, 0x5c, 0xe9, 0x1e, 0x3e, 0xc4, 0xfd, 0x83, 0xc1, + 0xc3, 0xa0, 0x01, 0xbf, 0x8f, 0xe3, 0xb3, 0xda, 0x8c, 0xda, 0x95, 0xd4, 0x27, 0x7b, 0x7a, 0x2e, + 0x78, 0xef, 0xce, 0x21, 0xee, 0x77, 0x07, 0xdd, 0xa0, 0x79, 0x99, 0x78, 0x2e, 0x78, 0x64, 0x3c, + 0xff, 0x07, 0x26, 0xae, 0xe5, 0xc4, 0xb0, 0x25, 0xd3, 0xd7, 0xc4, 0xb1, 0x73, 0x94, 0x80, 0x5c, + 0xf1, 0x1a, 0xf9, 0x68, 0x17, 0x69, 0xdd, 0xe8, 0x56, 0x2d, 0xf5, 0x48, 0x27, 0x35, 0xf3, 0x4e, + 0x8f, 0x0d, 0xf8, 0x41, 0xd4, 0x68, 0xea, 0x93, 0x7b, 0x26, 0x6c, 0xef, 0xae, 0x19, 0xe8, 0x04, + 0x36, 0xfa, 0xa8, 0x3a, 0x23, 0x6b, 0x3d, 0x79, 0x4c, 0xf6, 0xaa, 0xf5, 0xa8, 0x4b, 0x9c, 0x8a, + 0x72, 0x1e, 0x0d, 0x3f, 0x7c, 0x1c, 0x8e, 0x62, 0x17, 0x51, 0x42, 0xf6, 0x4f, 0x86, 0xf1, 0xf9, + 0xe9, 0xb1, 0x8b, 0x07, 0x23, 0xd2, 0x69, 0xb6, 0x3d, 0x21, 0xf7, 0xcf, 0x24, 0xa4, 0x5c, 0x29, + 0xea, 0xfd, 0xcf, 0xb8, 0x1b, 0xca, 0x6b, 0x79, 0xbb, 0x4f, 0xec, 0xa3, 0x3e, 0x7e, 0x86, 0x8f, + 0x8e, 0x16, 0x2b, 0x86, 0x96, 0x2b, 0x86, 0x36, 0x2b, 0x86, 0xbf, 0x97, 0x0c, 0xff, 0x2a, 0x19, + 0xfe, 0x5d, 0x32, 0xbc, 0x28, 0x19, 0xfe, 0x53, 0x32, 0xfc, 0xb7, 0x64, 0x68, 0x53, 0x32, 0xfc, + 0x73, 0xcd, 0xd0, 0x62, 0xcd, 0xd0, 0x72, 0xcd, 0xd0, 0x27, 0x67, 0x3b, 0x76, 0xfa, 0x5c, 0x24, + 0xc9, 0xbe, 0xf9, 0xc7, 0x17, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xa2, 0x48, 0x34, 0x87, + 0x02, 0x00, 0x00, } func (x Type) String() string { @@ -248,6 +264,9 @@ func (this *ClientToFrontend) Equal(that interface{}) bool { if this.ClientID != that1.ClientID { return false } + if !this.Stats.Equal(that1.Stats) { + return false + } return true } func (this *FrontendToClient) GoString() string { @@ -255,7 +274,7 @@ func (this *FrontendToClient) GoString() string { return "nil" } s := make([]string, 0, 6) - s = append(s, "&frontend.FrontendToClient{") + s = append(s, "&frontendv1pb.FrontendToClient{") if this.HttpRequest != nil { s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") } @@ -267,12 +286,15 @@ func (this *ClientToFrontend) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) - s = append(s, "&frontend.ClientToFrontend{") + s := make([]string, 0, 7) + s = append(s, "&frontendv1pb.ClientToFrontend{") if this.HttpResponse != nil { s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") } s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -461,6 +483,18 @@ func (m *ClientToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Stats != nil { + { + size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if len(m.ClientID) > 0 { i -= len(m.ClientID) copy(dAtA[i:], m.ClientID) @@ -524,6 +558,10 @@ func (m *ClientToFrontend) Size() (n int) { if l > 0 { n += 1 + l + sovFrontend(uint64(l)) } + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovFrontend(uint64(l)) + } return n } @@ -551,6 +589,7 @@ func (this *ClientToFrontend) String() string { s := strings.Join([]string{`&ClientToFrontend{`, `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, + `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, `}`, }, "") return s @@ -768,6 +807,42 @@ func (m *ClientToFrontend) Unmarshal(dAtA []byte) error { } m.ClientID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &stats.Stats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipFrontend(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.proto b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto similarity index 74% rename from vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.proto rename to vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto index 077c1db0d6208..c801993578a47 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto @@ -1,11 +1,14 @@ syntax = "proto3"; +// Protobuf package should not be changed when moving around go packages +// in order to not break backward compatibility. package frontend; -option go_package = "frontend"; +option go_package = "frontendv1pb"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; +import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -29,4 +32,5 @@ message FrontendToClient { message ClientToFrontend { httpgrpc.HTTPResponse httpResponse = 1; string clientID = 2; + stats.Stats stats = 3; } diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go new file mode 100644 index 0000000000000..da5942883c614 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go @@ -0,0 +1,315 @@ +package v2 + +import ( + "context" + "flag" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "go.uber.org/atomic" + + "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" + "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/grpcutil" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Config for a Frontend. +type Config struct { + SchedulerAddress string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` + WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` + GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` + + // Used to find local IP address, that is sent to scheduler and querier-worker. + InfNames []string `yaml:"instance_interface_names"` + + // If set, address is not computed from interfaces. + Addr string `yaml:"address" doc:"hidden"` + Port int `doc:"hidden"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.SchedulerAddress, "frontend.scheduler-address", "", "DNS hostname used for finding query-schedulers.") + f.DurationVar(&cfg.DNSLookupPeriod, "frontend.scheduler-dns-lookup-period", 10*time.Second, "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.") + f.IntVar(&cfg.WorkerConcurrency, "frontend.scheduler-worker-concurrency", 5, "Number of concurrent workers forwarding queries to single query-scheduler.") + + cfg.InfNames = []string{"eth0", "en0"} + f.Var((*flagext.StringSlice)(&cfg.InfNames), "frontend.instance-interface-names", "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.") + f.StringVar(&cfg.Addr, "frontend.instance-addr", "", "IP address to advertise to querier (via scheduler) (resolved via interfaces by default).") + f.IntVar(&cfg.Port, "frontend.instance-port", 0, "Port to advertise to querier (via scheduler) (defaults to server.grpc-listen-port).") + + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("frontend.grpc-client-config", f) +} + +// Frontend implements GrpcRoundTripper. It queues HTTP requests, +// dispatches them to backends via gRPC, and handles retries for requests which failed. +type Frontend struct { + services.Service + + cfg Config + log log.Logger + + lastQueryID atomic.Uint64 + + // frontend workers will read from this channel, and send request to scheduler. + requestsCh chan *frontendRequest + + schedulerWorkers *frontendSchedulerWorkers + requests *requestsInProgress +} + +type frontendRequest struct { + queryID uint64 + request *httpgrpc.HTTPRequest + userID string + + cancel context.CancelFunc + + enqueue chan enqueueResult + response chan *frontendv2pb.QueryResultRequest +} + +type enqueueStatus int + +const ( + // Sent to scheduler successfully, and frontend should wait for response now. + waitForResponse enqueueStatus = iota + + // Failed to forward request to scheduler, frontend will try again. + failed +) + +type enqueueResult struct { + status enqueueStatus + + cancelCh chan<- uint64 // Channel that can be used for request cancellation. If nil, cancellation is not possible. +} + +// New creates a new frontend. +func NewFrontend(cfg Config, log log.Logger, reg prometheus.Registerer) (*Frontend, error) { + requestsCh := make(chan *frontendRequest) + + schedulerWorkers, err := newFrontendSchedulerWorkers(cfg, fmt.Sprintf("%s:%d", cfg.Addr, cfg.Port), requestsCh, log) + if err != nil { + return nil, err + } + + f := &Frontend{ + cfg: cfg, + log: log, + requestsCh: requestsCh, + schedulerWorkers: schedulerWorkers, + requests: newRequestsInProgress(), + } + // Randomize to avoid getting responses from queries sent before restart, which could lead to mixing results + // between different queries. Note that frontend verifies the user, so it cannot leak results between tenants. + // This isn't perfect, but better than nothing. + f.lastQueryID.Store(rand.Uint64()) + + promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_frontend_queries_in_progress", + Help: "Number of queries in progress handled by this frontend.", + }, func() float64 { + return float64(f.requests.count()) + }) + + promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_frontend_connected_schedulers", + Help: "Number of schedulers this frontend is connected to.", + }, func() float64 { + return float64(f.schedulerWorkers.getWorkersCount()) + }) + + f.Service = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (f *Frontend) starting(ctx context.Context) error { + return errors.Wrap(services.StartAndAwaitRunning(ctx, f.schedulerWorkers), "failed to start frontend scheduler workers") +} + +func (f *Frontend) stopping(_ error) error { + return errors.Wrap(services.StopAndAwaitTerminated(context.Background(), f.schedulerWorkers), "failed to stop frontend scheduler workers") +} + +// RoundTripGRPC round trips a proto (instead of a HTTP request). +func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { + if s := f.State(); s != services.Running { + return nil, fmt.Errorf("frontend not running: %v", s) + } + + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + // Propagate trace context in gRPC too - this will be ignored if using HTTP. + tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) + if tracer != nil && span != nil { + carrier := (*grpcutil.HttpgrpcHeadersCarrier)(req) + if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil { + return nil, err + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + freq := &frontendRequest{ + queryID: f.lastQueryID.Inc(), + request: req, + userID: userID, + + cancel: cancel, + + // Buffer of 1 to ensure response or error can be written to the channel + // even if this goroutine goes away due to client context cancellation. + enqueue: make(chan enqueueResult, 1), + response: make(chan *frontendv2pb.QueryResultRequest, 1), + } + + f.requests.put(freq) + defer f.requests.delete(freq.queryID) + + retries := f.cfg.WorkerConcurrency + 1 // To make sure we hit at least two different schedulers. + +enqueueAgain: + select { + case <-ctx.Done(): + return nil, ctx.Err() + + case f.requestsCh <- freq: + // Enqueued, let's wait for response. + } + + var cancelCh chan<- uint64 + + select { + case <-ctx.Done(): + return nil, ctx.Err() + + case enqRes := <-freq.enqueue: + if enqRes.status == waitForResponse { + cancelCh = enqRes.cancelCh + break // go wait for response. + } else if enqRes.status == failed { + retries-- + if retries > 0 { + goto enqueueAgain + } + } + + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "failed to enqueue request") + } + + select { + case <-ctx.Done(): + if cancelCh != nil { + select { + case cancelCh <- freq.queryID: + // cancellation sent. + default: + // failed to cancel, ignore. + } + } + return nil, ctx.Err() + + case resp := <-freq.response: + if stats.ShouldTrackHTTPGRPCResponse(resp.HttpResponse) { + stats := stats.FromContext(ctx) + stats.Merge(resp.Stats) // Safe if stats is nil. + } + + return resp.HttpResponse, nil + } +} + +func (f *Frontend) QueryResult(ctx context.Context, qrReq *frontendv2pb.QueryResultRequest) (*frontendv2pb.QueryResultResponse, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + req := f.requests.get(qrReq.QueryID) + // It is possible that some old response belonging to different user was received, if frontend has restarted. + // To avoid leaking query results between users, we verify the user here. + // To avoid mixing results from different queries, we randomize queryID counter on start. + if req != nil && req.userID == userID { + select { + case req.response <- qrReq: + // Should always be possible, unless QueryResult is called multiple times with the same queryID. + default: + level.Warn(f.log).Log("msg", "failed to write query result to the response channel", "queryID", qrReq.QueryID, "user", userID) + } + } + + return &frontendv2pb.QueryResultResponse{}, nil +} + +// CheckReady determines if the query frontend is ready. Function parameters/return +// chosen to match the same method in the ingester +func (f *Frontend) CheckReady(_ context.Context) error { + workers := f.schedulerWorkers.getWorkersCount() + + // If frontend is connected to at least one scheduler, we are ready. + if workers > 0 { + return nil + } + + msg := fmt.Sprintf("not ready: number of schedulers this worker is connected to is %d", workers) + level.Info(f.log).Log("msg", msg) + return errors.New(msg) +} + +type requestsInProgress struct { + mu sync.Mutex + requests map[uint64]*frontendRequest +} + +func newRequestsInProgress() *requestsInProgress { + return &requestsInProgress{ + requests: map[uint64]*frontendRequest{}, + } +} + +func (r *requestsInProgress) count() int { + r.mu.Lock() + defer r.mu.Unlock() + + return len(r.requests) +} + +func (r *requestsInProgress) put(req *frontendRequest) { + r.mu.Lock() + defer r.mu.Unlock() + + r.requests[req.queryID] = req +} + +func (r *requestsInProgress) delete(queryID uint64) { + r.mu.Lock() + defer r.mu.Unlock() + + delete(r.requests, queryID) +} + +func (r *requestsInProgress) get(queryID uint64) *frontendRequest { + r.mu.Lock() + defer r.mu.Unlock() + + return r.requests[queryID] +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go new file mode 100644 index 0000000000000..577a0d27abf27 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go @@ -0,0 +1,327 @@ +package v2 + +import ( + "context" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/weaveworks/common/httpgrpc" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" + "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +type frontendSchedulerWorkers struct { + services.Service + + cfg Config + log log.Logger + frontendAddress string + + // Channel with requests that should be forwarded to the scheduler. + requestsCh <-chan *frontendRequest + + watcher services.Service + + mu sync.Mutex + // Set to nil when stop is called... no more workers are created afterwards. + workers map[string]*frontendSchedulerWorker +} + +func newFrontendSchedulerWorkers(cfg Config, frontendAddress string, requestsCh <-chan *frontendRequest, log log.Logger) (*frontendSchedulerWorkers, error) { + f := &frontendSchedulerWorkers{ + cfg: cfg, + log: log, + frontendAddress: frontendAddress, + requestsCh: requestsCh, + workers: map[string]*frontendSchedulerWorker{}, + } + + w, err := util.NewDNSWatcher(cfg.SchedulerAddress, cfg.DNSLookupPeriod, f) + if err != nil { + return nil, err + } + + f.watcher = w + f.Service = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (f *frontendSchedulerWorkers) starting(ctx context.Context) error { + return services.StartAndAwaitRunning(ctx, f.watcher) +} + +func (f *frontendSchedulerWorkers) stopping(_ error) error { + err := services.StopAndAwaitTerminated(context.Background(), f.watcher) + + f.mu.Lock() + defer f.mu.Unlock() + + for _, w := range f.workers { + w.stop() + } + f.workers = nil + + return err +} + +func (f *frontendSchedulerWorkers) AddressAdded(address string) { + f.mu.Lock() + ws := f.workers + w := f.workers[address] + f.mu.Unlock() + + // Already stopped or we already have worker for this address. + if ws == nil || w != nil { + return + } + + level.Info(f.log).Log("msg", "adding connection to scheduler", "addr", address) + conn, err := f.connectToScheduler(context.Background(), address) + if err != nil { + level.Error(f.log).Log("msg", "error connecting to scheduler", "addr", address, "err", err) + return + } + + // No worker for this address yet, start a new one. + w = newFrontendSchedulerWorker(conn, address, f.frontendAddress, f.requestsCh, f.cfg.WorkerConcurrency, f.log) + + f.mu.Lock() + defer f.mu.Unlock() + + // Can be nil if stopping has been called already. + if f.workers != nil { + f.workers[address] = w + w.start() + } +} + +func (f *frontendSchedulerWorkers) AddressRemoved(address string) { + level.Info(f.log).Log("msg", "removing connection to scheduler", "addr", address) + + f.mu.Lock() + // This works fine if f.workers is nil already. + w := f.workers[address] + delete(f.workers, address) + f.mu.Unlock() + + if w != nil { + w.stop() + } +} + +// Get number of workers. +func (f *frontendSchedulerWorkers) getWorkersCount() int { + f.mu.Lock() + defer f.mu.Unlock() + + return len(f.workers) +} + +func (f *frontendSchedulerWorkers) connectToScheduler(ctx context.Context, address string) (*grpc.ClientConn, error) { + // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. + opts, err := f.cfg.GRPCClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + + conn, err := grpc.DialContext(ctx, address, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// Worker managing single gRPC connection to Scheduler. Each worker starts multiple goroutines for forwarding +// requests and cancellations to scheduler. +type frontendSchedulerWorker struct { + log log.Logger + + conn *grpc.ClientConn + concurrency int + schedulerAddr string + frontendAddr string + + // Context and cancellation used by individual goroutines. + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + // Shared between all frontend workers. + requestCh <-chan *frontendRequest + + // Cancellation requests for this scheduler are received via this channel. It is passed to frontend after + // query has been enqueued to scheduler. + cancelCh chan uint64 +} + +func newFrontendSchedulerWorker(conn *grpc.ClientConn, schedulerAddr string, frontendAddr string, requestCh <-chan *frontendRequest, concurrency int, log log.Logger) *frontendSchedulerWorker { + w := &frontendSchedulerWorker{ + log: log, + conn: conn, + concurrency: concurrency, + schedulerAddr: schedulerAddr, + frontendAddr: frontendAddr, + requestCh: requestCh, + cancelCh: make(chan uint64), + } + w.ctx, w.cancel = context.WithCancel(context.Background()) + + return w +} + +func (w *frontendSchedulerWorker) start() { + client := schedulerpb.NewSchedulerForFrontendClient(w.conn) + for i := 0; i < w.concurrency; i++ { + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.runOne(w.ctx, client) + }() + } +} + +func (w *frontendSchedulerWorker) stop() { + w.cancel() + w.wg.Wait() + if err := w.conn.Close(); err != nil { + level.Error(w.log).Log("msg", "error while closing connection to scheduler", "err", err) + } +} + +func (w *frontendSchedulerWorker) runOne(ctx context.Context, client schedulerpb.SchedulerForFrontendClient) { + backoffConfig := util.BackoffConfig{ + MinBackoff: 50 * time.Millisecond, + MaxBackoff: 1 * time.Second, + } + + backoff := util.NewBackoff(ctx, backoffConfig) + for backoff.Ongoing() { + loop, loopErr := client.FrontendLoop(ctx) + if loopErr != nil { + level.Error(w.log).Log("msg", "error contacting scheduler", "err", loopErr, "addr", w.schedulerAddr) + backoff.Wait() + continue + } + + loopErr = w.schedulerLoop(loop) + if closeErr := loop.CloseSend(); closeErr != nil { + level.Debug(w.log).Log("msg", "failed to close frontend loop", "err", loopErr, "addr", w.schedulerAddr) + } + + if loopErr != nil { + level.Error(w.log).Log("msg", "error sending requests to scheduler", "err", loopErr, "addr", w.schedulerAddr) + backoff.Wait() + continue + } + + backoff.Reset() + } +} + +func (w *frontendSchedulerWorker) schedulerLoop(loop schedulerpb.SchedulerForFrontend_FrontendLoopClient) error { + if err := loop.Send(&schedulerpb.FrontendToScheduler{ + Type: schedulerpb.INIT, + FrontendAddress: w.frontendAddr, + }); err != nil { + return err + } + + if resp, err := loop.Recv(); err != nil || resp.Status != schedulerpb.OK { + if err != nil { + return err + } + return errors.Errorf("unexpected status received for init: %v", resp.Status) + } + + ctx := loop.Context() + + for { + select { + case <-ctx.Done(): + // No need to report error if our internal context is canceled. This can happen during shutdown, + // or when scheduler is no longer resolvable. (It would be nice if this context reported "done" also when + // connection scheduler stops the call, but that doesn't seem to be the case). + // + // Reporting error here would delay reopening the stream (if the worker context is not done yet). + level.Debug(w.log).Log("msg", "stream context finished", "err", ctx.Err()) + return nil + + case req := <-w.requestCh: + err := loop.Send(&schedulerpb.FrontendToScheduler{ + Type: schedulerpb.ENQUEUE, + QueryID: req.queryID, + UserID: req.userID, + HttpRequest: req.request, + FrontendAddress: w.frontendAddr, + }) + + if err != nil { + req.enqueue <- enqueueResult{status: failed} + return err + } + + resp, err := loop.Recv() + if err != nil { + req.enqueue <- enqueueResult{status: failed} + return err + } + + switch resp.Status { + case schedulerpb.OK: + req.enqueue <- enqueueResult{status: waitForResponse, cancelCh: w.cancelCh} + // Response will come from querier. + + case schedulerpb.SHUTTING_DOWN: + // Scheduler is shutting down, report failure to enqueue and stop this loop. + req.enqueue <- enqueueResult{status: failed} + return errors.New("scheduler is shutting down") + + case schedulerpb.ERROR: + req.enqueue <- enqueueResult{status: waitForResponse} + req.response <- &frontendv2pb.QueryResultRequest{ + HttpResponse: &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(err.Error()), + }, + } + + case schedulerpb.TOO_MANY_REQUESTS_PER_TENANT: + req.enqueue <- enqueueResult{status: waitForResponse} + req.response <- &frontendv2pb.QueryResultRequest{ + HttpResponse: &httpgrpc.HTTPResponse{ + Code: http.StatusTooManyRequests, + Body: []byte("too many outstanding requests"), + }, + } + } + + case reqID := <-w.cancelCh: + err := loop.Send(&schedulerpb.FrontendToScheduler{ + Type: schedulerpb.CANCEL, + QueryID: reqID, + }) + + if err != nil { + return err + } + + resp, err := loop.Recv() + if err != nil { + return err + } + + // Scheduler may be shutting down, report that. + if resp.Status != schedulerpb.OK { + return errors.Errorf("unexpected status received for cancellation: %v", resp.Status) + } + } + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go new file mode 100644 index 0000000000000..daeb50a8c3b49 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.pb.go @@ -0,0 +1,782 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: frontend.proto + +package frontendv2pb + +import ( + context "context" + fmt "fmt" + stats "github.com/cortexproject/cortex/pkg/querier/stats" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + httpgrpc "github.com/weaveworks/common/httpgrpc" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryResultRequest struct { + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,2,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` + Stats *stats.Stats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` +} + +func (m *QueryResultRequest) Reset() { *m = QueryResultRequest{} } +func (*QueryResultRequest) ProtoMessage() {} +func (*QueryResultRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{0} +} +func (m *QueryResultRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResultRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResultRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResultRequest.Merge(m, src) +} +func (m *QueryResultRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryResultRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResultRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResultRequest proto.InternalMessageInfo + +func (m *QueryResultRequest) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *QueryResultRequest) GetHttpResponse() *httpgrpc.HTTPResponse { + if m != nil { + return m.HttpResponse + } + return nil +} + +func (m *QueryResultRequest) GetStats() *stats.Stats { + if m != nil { + return m.Stats + } + return nil +} + +type QueryResultResponse struct { +} + +func (m *QueryResultResponse) Reset() { *m = QueryResultResponse{} } +func (*QueryResultResponse) ProtoMessage() {} +func (*QueryResultResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eca3873955a29cfe, []int{1} +} +func (m *QueryResultResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResultResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResultResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResultResponse.Merge(m, src) +} +func (m *QueryResultResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryResultResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResultResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResultResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*QueryResultRequest)(nil), "frontendv2pb.QueryResultRequest") + proto.RegisterType((*QueryResultResponse)(nil), "frontendv2pb.QueryResultResponse") +} + +func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } + +var fileDescriptor_eca3873955a29cfe = []byte{ + // 351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcd, 0x4e, 0x3a, 0x31, + 0x14, 0xc5, 0xdb, 0xff, 0xdf, 0x8f, 0xa4, 0x10, 0x17, 0x35, 0x9a, 0x09, 0x8b, 0x06, 0x67, 0xc5, + 0xc6, 0x69, 0x82, 0xae, 0x4c, 0xdc, 0x10, 0x43, 0x74, 0x27, 0x23, 0x2b, 0x77, 0xcc, 0x58, 0x86, + 0x0f, 0x99, 0x96, 0xb6, 0x03, 0xb2, 0xf3, 0x09, 0x8c, 0x8f, 0xe1, 0xa3, 0xb8, 0x64, 0xc9, 0x52, + 0xca, 0xc6, 0x25, 0x8f, 0x60, 0x68, 0x81, 0x0c, 0x31, 0x71, 0xd3, 0xdc, 0x93, 0x7b, 0x7e, 0xb9, + 0xe7, 0xde, 0xa2, 0xa3, 0xb6, 0xe4, 0xa9, 0x66, 0xe9, 0x53, 0x20, 0x24, 0xd7, 0x1c, 0x17, 0x37, + 0x7a, 0x54, 0x15, 0x51, 0xe9, 0x3c, 0xe9, 0xea, 0x4e, 0x16, 0x05, 0x31, 0x1f, 0xd0, 0x84, 0x27, + 0x9c, 0x5a, 0x53, 0x94, 0xb5, 0xad, 0xb2, 0xc2, 0x56, 0x0e, 0x2e, 0x5d, 0xe6, 0xec, 0x63, 0xd6, + 0x1a, 0xb1, 0x31, 0x97, 0x7d, 0x45, 0x63, 0x3e, 0x18, 0xf0, 0x94, 0x76, 0xb4, 0x16, 0x89, 0x14, + 0xf1, 0xb6, 0x58, 0x53, 0xd7, 0x39, 0x2a, 0xe6, 0x52, 0xb3, 0x17, 0x21, 0x79, 0x8f, 0xc5, 0x7a, + 0xad, 0xa8, 0xe8, 0x27, 0x74, 0x98, 0x31, 0xd9, 0x65, 0x92, 0x2a, 0xdd, 0xd2, 0xca, 0xbd, 0x0e, + 0xf7, 0xdf, 0x20, 0xc2, 0x8d, 0x8c, 0xc9, 0x49, 0xc8, 0x54, 0xf6, 0xac, 0x43, 0x36, 0xcc, 0x98, + 0xd2, 0xd8, 0x43, 0x87, 0x2b, 0x66, 0x72, 0x77, 0xe3, 0xc1, 0x32, 0xac, 0xec, 0x85, 0x1b, 0x89, + 0xaf, 0x50, 0x71, 0x95, 0x20, 0x64, 0x4a, 0xf0, 0x54, 0x31, 0xef, 0x5f, 0x19, 0x56, 0x0a, 0xd5, + 0xd3, 0x60, 0x1b, 0xeb, 0xb6, 0xd9, 0xbc, 0xdf, 0x74, 0xc3, 0x1d, 0x2f, 0xf6, 0xd1, 0xbe, 0x9d, + 0xed, 0xfd, 0xb7, 0x50, 0x31, 0x70, 0x49, 0x1e, 0x56, 0x6f, 0xe8, 0x5a, 0xfe, 0x09, 0x3a, 0xde, + 0xc9, 0xe3, 0xd0, 0x6a, 0x0f, 0xe1, 0xfa, 0xfa, 0xb6, 0x75, 0x2e, 0x1b, 0x6e, 0x1f, 0xdc, 0x44, + 0x85, 0x9c, 0x19, 0x97, 0x83, 0xfc, 0xfd, 0x83, 0xdf, 0x7b, 0x95, 0xce, 0xfe, 0x70, 0xb8, 0x49, + 0x3e, 0xa8, 0xd5, 0xa6, 0x73, 0x02, 0x66, 0x73, 0x02, 0x96, 0x73, 0x02, 0x5f, 0x0d, 0x81, 0x1f, + 0x86, 0xc0, 0x4f, 0x43, 0xe0, 0xd4, 0x10, 0xf8, 0x65, 0x08, 0xfc, 0x36, 0x04, 0x2c, 0x0d, 0x81, + 0xef, 0x0b, 0x02, 0xa6, 0x0b, 0x02, 0x66, 0x0b, 0x02, 0x1e, 0x77, 0xfe, 0x3e, 0x3a, 0xb0, 0xe7, + 0xbd, 0xf8, 0x09, 0x00, 0x00, 0xff, 0xff, 0x02, 0xb0, 0x28, 0xb5, 0x22, 0x02, 0x00, 0x00, +} + +func (this *QueryResultRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResultRequest) + if !ok { + that2, ok := that.(QueryResultRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if !this.HttpResponse.Equal(that1.HttpResponse) { + return false + } + if !this.Stats.Equal(that1.Stats) { + return false + } + return true +} +func (this *QueryResultResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResultResponse) + if !ok { + that2, ok := that.(QueryResultResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *QueryResultRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&frontendv2pb.QueryResultRequest{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + if this.HttpResponse != nil { + s = append(s, "HttpResponse: "+fmt.Sprintf("%#v", this.HttpResponse)+",\n") + } + if this.Stats != nil { + s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResultResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&frontendv2pb.QueryResultResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFrontend(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FrontendForQuerierClient is the client API for FrontendForQuerier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FrontendForQuerierClient interface { + QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) +} + +type frontendForQuerierClient struct { + cc *grpc.ClientConn +} + +func NewFrontendForQuerierClient(cc *grpc.ClientConn) FrontendForQuerierClient { + return &frontendForQuerierClient{cc} +} + +func (c *frontendForQuerierClient) QueryResult(ctx context.Context, in *QueryResultRequest, opts ...grpc.CallOption) (*QueryResultResponse, error) { + out := new(QueryResultResponse) + err := c.cc.Invoke(ctx, "/frontendv2pb.FrontendForQuerier/QueryResult", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FrontendForQuerierServer is the server API for FrontendForQuerier service. +type FrontendForQuerierServer interface { + QueryResult(context.Context, *QueryResultRequest) (*QueryResultResponse, error) +} + +// UnimplementedFrontendForQuerierServer can be embedded to have forward compatible implementations. +type UnimplementedFrontendForQuerierServer struct { +} + +func (*UnimplementedFrontendForQuerierServer) QueryResult(ctx context.Context, req *QueryResultRequest) (*QueryResultResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryResult not implemented") +} + +func RegisterFrontendForQuerierServer(s *grpc.Server, srv FrontendForQuerierServer) { + s.RegisterService(&_FrontendForQuerier_serviceDesc, srv) +} + +func _FrontendForQuerier_QueryResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FrontendForQuerierServer).QueryResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/frontendv2pb.FrontendForQuerier/QueryResult", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FrontendForQuerierServer).QueryResult(ctx, req.(*QueryResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _FrontendForQuerier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "frontendv2pb.FrontendForQuerier", + HandlerType: (*FrontendForQuerierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "QueryResult", + Handler: _FrontendForQuerier_QueryResult_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "frontend.proto", +} + +func (m *QueryResultRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResultRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stats != nil { + { + size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HttpResponse != nil { + { + size, err := m.HttpResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFrontend(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.QueryID != 0 { + i = encodeVarintFrontend(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResultResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResultResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintFrontend(dAtA []byte, offset int, v uint64) int { + offset -= sovFrontend(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryResultRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovFrontend(uint64(m.QueryID)) + } + if m.HttpResponse != nil { + l = m.HttpResponse.Size() + n += 1 + l + sovFrontend(uint64(l)) + } + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovFrontend(uint64(l)) + } + return n +} + +func (m *QueryResultResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovFrontend(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFrontend(x uint64) (n int) { + return sovFrontend(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *QueryResultRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResultRequest{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `HttpResponse:` + strings.Replace(fmt.Sprintf("%v", this.HttpResponse), "HTTPResponse", "httpgrpc.HTTPResponse", 1) + `,`, + `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Stats", "stats.Stats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *QueryResultResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResultResponse{`, + `}`, + }, "") + return s +} +func valueToStringFrontend(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *QueryResultRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResultRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResultRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpResponse == nil { + m.HttpResponse = &httpgrpc.HTTPResponse{} + } + if err := m.HttpResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFrontend + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFrontend + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &stats.Stats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResultResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResultResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResultResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipFrontend(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFrontend + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFrontend(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFrontend + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthFrontend + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFrontend + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFrontend(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthFrontend + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFrontend = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFrontend = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto new file mode 100644 index 0000000000000..b93106d7873c3 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb/frontend.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package frontendv2pb; + +option go_package = "frontendv2pb"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; +import "github.com/cortexproject/cortex/pkg/querier/stats/stats.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Frontend interface exposed to Queriers. Used by queriers to report back the result of the query. +service FrontendForQuerier { + rpc QueryResult (QueryResultRequest) returns (QueryResultResponse) { }; +} + +message QueryResultRequest { + uint64 queryID = 1; + httpgrpc.HTTPResponse httpResponse = 2; + stats.Stats stats = 3; + + // There is no userID field here, because Querier puts userID into the context when + // calling QueryResult, and that is where Frontend expects to find it. +} + +message QueryResultResponse { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index bcc94a7c60bc5..ca6b265ff7e8c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -447,7 +447,9 @@ func (m *QueryStreamResponse) GetTimeseries() []TimeSeries { } type LabelValuesRequest struct { - LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` + LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` + StartTimestampMs int64 `protobuf:"varint,2,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,3,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } @@ -489,6 +491,20 @@ func (m *LabelValuesRequest) GetLabelName() string { return "" } +func (m *LabelValuesRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *LabelValuesRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + type LabelValuesResponse struct { LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` } @@ -533,6 +549,8 @@ func (m *LabelValuesResponse) GetLabelValues() []string { } type LabelNamesRequest struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } @@ -567,6 +585,20 @@ func (m *LabelNamesRequest) XXX_DiscardUnknown() { var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo +func (m *LabelNamesRequest) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *LabelNamesRequest) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + type LabelNamesResponse struct { LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` } @@ -1339,10 +1371,10 @@ func (m *LabelMatchers) GetMatchers() []*LabelMatcher { } type MetricMetadata struct { - Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MetricMetadata_MetricType" json:"type,omitempty"` - MetricName string `protobuf:"bytes,2,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` - Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` - Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MetricMetadata_MetricType" json:"type,omitempty"` + MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` } func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } @@ -1384,9 +1416,9 @@ func (m *MetricMetadata) GetType() MetricMetadata_MetricType { return UNKNOWN } -func (m *MetricMetadata) GetMetricName() string { +func (m *MetricMetadata) GetMetricFamilyName() string { if m != nil { - return m.MetricName + return m.MetricFamilyName } return "" } @@ -1606,98 +1638,100 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1456 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xbb, 0x6f, 0x1b, 0x47, - 0x13, 0xbf, 0x15, 0x1f, 0x12, 0x87, 0x14, 0x75, 0x5a, 0xc9, 0x36, 0x4d, 0xe3, 0x3b, 0xda, 0x0b, - 0xd8, 0x9f, 0xf0, 0x7d, 0xb1, 0xec, 0xc8, 0x70, 0xa2, 0x22, 0x81, 0x41, 0xd9, 0x94, 0xcc, 0x44, - 0xa4, 0xe4, 0x25, 0x15, 0x27, 0x01, 0x02, 0xe2, 0x44, 0xae, 0xa4, 0x43, 0xee, 0x8e, 0xf4, 0x3d, - 0x82, 0xa8, 0x08, 0x10, 0x20, 0x65, 0x8a, 0xb8, 0xcc, 0x9f, 0x90, 0x3a, 0x4d, 0xfa, 0x54, 0x2e, - 0x5d, 0x1a, 0x29, 0x8c, 0x58, 0x6e, 0xd2, 0xc5, 0xc8, 0x5f, 0x10, 0xec, 0xe3, 0x8e, 0x77, 0x34, - 0x95, 0x38, 0x0f, 0x77, 0xb7, 0x33, 0xbf, 0xf9, 0xed, 0xec, 0xec, 0xcc, 0xec, 0x1c, 0x94, 0xfa, - 0x43, 0x2f, 0x60, 0x9f, 0xaf, 0x8e, 0xbc, 0x61, 0x30, 0xc4, 0x79, 0xb9, 0xaa, 0x5e, 0x3d, 0xb4, - 0x82, 0xa3, 0x70, 0x7f, 0xb5, 0x3f, 0x74, 0xae, 0x1d, 0x0e, 0x0f, 0x87, 0xd7, 0x84, 0x7a, 0x3f, - 0x3c, 0x10, 0x2b, 0xb1, 0x10, 0x5f, 0xd2, 0x8c, 0xfc, 0x86, 0xa0, 0x74, 0xdf, 0xb3, 0x02, 0x46, - 0xd9, 0x83, 0x90, 0xf9, 0x01, 0x6e, 0x03, 0x04, 0x96, 0xc3, 0x7c, 0xe6, 0x59, 0xcc, 0xaf, 0xa0, - 0x8b, 0x99, 0x95, 0xe2, 0x1a, 0x5e, 0x55, 0x5b, 0x75, 0x2d, 0x87, 0x75, 0x84, 0x66, 0xa3, 0xfa, - 0xe8, 0x69, 0x4d, 0xfb, 0xe9, 0x69, 0x0d, 0xef, 0x7a, 0xcc, 0xb4, 0xed, 0x61, 0xbf, 0x1b, 0x5b, - 0xd1, 0x04, 0x03, 0x7e, 0x1b, 0xf2, 0x9d, 0x61, 0xe8, 0xf5, 0x59, 0x65, 0xe6, 0x22, 0x5a, 0x29, - 0xaf, 0xd5, 0x22, 0xae, 0xe4, 0xae, 0xab, 0x12, 0xd2, 0x70, 0x43, 0x87, 0x2a, 0x38, 0x5e, 0x87, - 0x39, 0x87, 0x05, 0xe6, 0xc0, 0x0c, 0xcc, 0x4a, 0x46, 0xb8, 0x71, 0x36, 0x32, 0x6d, 0xb1, 0xc0, - 0xb3, 0xfa, 0x2d, 0xa5, 0xdd, 0xc8, 0x3e, 0x7a, 0x5a, 0x43, 0x34, 0x46, 0x93, 0x1a, 0xc0, 0x98, - 0x0f, 0xcf, 0x42, 0xa6, 0xbe, 0xdb, 0xd4, 0x35, 0x3c, 0x07, 0x59, 0xba, 0xb7, 0xdd, 0xd0, 0x11, - 0x59, 0x80, 0x79, 0xb5, 0xbb, 0x3f, 0x1a, 0xba, 0x3e, 0x23, 0xef, 0x42, 0x91, 0x32, 0x73, 0x10, - 0xc5, 0x60, 0x15, 0x66, 0x1f, 0x84, 0xc9, 0x00, 0x2c, 0x47, 0x3b, 0xdf, 0x0b, 0x99, 0x77, 0xac, - 0x60, 0x34, 0x02, 0x91, 0x5b, 0x50, 0x92, 0xe6, 0x92, 0x0e, 0x5f, 0x83, 0x59, 0x8f, 0xf9, 0xa1, - 0x1d, 0x44, 0xf6, 0x67, 0x26, 0xec, 0x25, 0x8e, 0x46, 0x28, 0xf2, 0x2d, 0x82, 0x52, 0x92, 0x1a, - 0xbf, 0x01, 0xd8, 0x0f, 0x4c, 0x2f, 0xe8, 0x89, 0x48, 0x06, 0xa6, 0x33, 0xea, 0x39, 0x9c, 0x0c, - 0xad, 0x64, 0xa8, 0x2e, 0x34, 0xdd, 0x48, 0xd1, 0xf2, 0xf1, 0x0a, 0xe8, 0xcc, 0x1d, 0xa4, 0xb1, - 0x33, 0x02, 0x5b, 0x66, 0xee, 0x20, 0x89, 0xbc, 0x0e, 0x73, 0x8e, 0x19, 0xf4, 0x8f, 0x98, 0xe7, - 0xab, 0xa0, 0xc6, 0x47, 0xdb, 0x36, 0xf7, 0x99, 0xdd, 0x92, 0x4a, 0x1a, 0xa3, 0x48, 0x13, 0xe6, - 0x53, 0x4e, 0xe3, 0xf5, 0x57, 0x4c, 0x10, 0x7e, 0x2b, 0x5a, 0x32, 0x15, 0xc8, 0x43, 0x04, 0x4b, - 0x82, 0xab, 0x13, 0x78, 0xcc, 0x74, 0x62, 0xc6, 0x5b, 0x50, 0xec, 0x1f, 0x85, 0xee, 0xa7, 0x29, - 0xca, 0x73, 0x2f, 0x53, 0xde, 0xe6, 0x20, 0xc5, 0x9b, 0xb4, 0x98, 0x70, 0x69, 0xe6, 0x2f, 0xb8, - 0x74, 0x03, 0xb0, 0x38, 0xf7, 0x07, 0xa6, 0x1d, 0x32, 0x3f, 0x8a, 0xfe, 0x7f, 0x00, 0x6c, 0x2e, - 0xed, 0xb9, 0xa6, 0xc3, 0x44, 0xd4, 0x0b, 0xb4, 0x20, 0x24, 0x6d, 0xd3, 0x61, 0x64, 0x1d, 0x96, - 0x52, 0x46, 0xea, 0x18, 0x97, 0xa0, 0x24, 0xad, 0x3e, 0x13, 0x72, 0x71, 0x8e, 0x02, 0x2d, 0xda, - 0x63, 0x28, 0x59, 0x82, 0xc5, 0xed, 0x88, 0x26, 0xda, 0x8d, 0xdc, 0x54, 0x3e, 0x28, 0xa1, 0x62, - 0xab, 0x41, 0x71, 0xec, 0x43, 0x44, 0x06, 0xb1, 0x13, 0x3e, 0xc1, 0xa0, 0xef, 0xf9, 0xcc, 0xeb, - 0x04, 0x66, 0x10, 0x53, 0xfd, 0x80, 0x60, 0x31, 0x21, 0x54, 0x54, 0x97, 0xa1, 0x6c, 0xb9, 0x87, - 0xcc, 0x0f, 0xac, 0xa1, 0xdb, 0xf3, 0xcc, 0x40, 0x1e, 0x09, 0xd1, 0xf9, 0x58, 0x4a, 0xcd, 0x80, - 0xf1, 0x53, 0xbb, 0xa1, 0xd3, 0x8b, 0xa3, 0x88, 0x56, 0xb2, 0xb4, 0xe0, 0x86, 0x8e, 0x0c, 0x1e, - 0x4f, 0x49, 0x73, 0x64, 0xf5, 0x26, 0x98, 0x32, 0x82, 0x49, 0x37, 0x47, 0x56, 0x33, 0x45, 0xb6, - 0x0a, 0x4b, 0x5e, 0x68, 0xb3, 0x49, 0x78, 0x56, 0xc0, 0x17, 0xb9, 0x2a, 0x85, 0x27, 0x9f, 0xc0, - 0x12, 0x77, 0xbc, 0x79, 0x27, 0xed, 0xfa, 0x39, 0x98, 0x0d, 0x7d, 0xe6, 0xf5, 0xac, 0x81, 0xba, - 0x86, 0x3c, 0x5f, 0x36, 0x07, 0xf8, 0x2a, 0x64, 0x45, 0x67, 0xe0, 0x6e, 0x16, 0xd7, 0xce, 0x47, - 0x97, 0xfd, 0xd2, 0xe1, 0xa9, 0x80, 0x91, 0x2d, 0xc0, 0x5c, 0xe5, 0xa7, 0xd9, 0xdf, 0x84, 0x9c, - 0xcf, 0x05, 0x2a, 0xe5, 0x2e, 0x24, 0x59, 0x26, 0x3c, 0xa1, 0x12, 0x49, 0xbe, 0x47, 0x60, 0xc8, - 0xf6, 0xe3, 0x6f, 0x0e, 0xbd, 0x64, 0xcd, 0xf8, 0xaf, 0xbb, 0x76, 0xd7, 0xa1, 0x14, 0x55, 0x65, - 0xcf, 0x67, 0x81, 0xaa, 0xdf, 0x33, 0xd3, 0xea, 0xd7, 0xa7, 0xc5, 0x08, 0xda, 0x61, 0x01, 0x69, - 0x42, 0xed, 0x54, 0x9f, 0x55, 0x28, 0xae, 0x40, 0xde, 0x11, 0x10, 0x15, 0x8b, 0x72, 0xba, 0xd7, - 0x52, 0xa5, 0x25, 0x15, 0x38, 0xab, 0xa8, 0xa2, 0xf6, 0x1b, 0xe5, 0x5e, 0x0b, 0xce, 0xbd, 0xa4, - 0x51, 0xe4, 0x6b, 0x89, 0x56, 0x8e, 0xfe, 0xa8, 0x95, 0x27, 0x9a, 0xf8, 0x8f, 0x08, 0x16, 0x26, - 0x4a, 0x9f, 0xc7, 0xea, 0xc0, 0x1b, 0x3a, 0x2a, 0xa9, 0x92, 0x69, 0x51, 0xe6, 0xf2, 0xa6, 0x12, - 0x37, 0x07, 0xc9, 0xbc, 0x99, 0x49, 0xe5, 0xcd, 0x2d, 0xc8, 0x8b, 0x1a, 0x8a, 0xda, 0xdf, 0x62, - 0x2a, 0x7c, 0xbb, 0xa6, 0xe5, 0x6d, 0x2c, 0xab, 0x97, 0xad, 0x24, 0x44, 0xf5, 0x81, 0x39, 0x0a, - 0x98, 0x47, 0x95, 0x19, 0xfe, 0x3f, 0xe4, 0x65, 0xeb, 0xa9, 0x64, 0x05, 0xc1, 0x7c, 0x44, 0x90, - 0xec, 0x4e, 0x0a, 0x42, 0xbe, 0x41, 0x90, 0x93, 0xae, 0xbf, 0xae, 0xa4, 0xa8, 0xc2, 0x1c, 0x73, - 0xfb, 0xc3, 0x81, 0xe5, 0x1e, 0x8a, 0x5a, 0xcc, 0xd1, 0x78, 0x8d, 0xb1, 0xaa, 0x11, 0x5e, 0x74, - 0x25, 0x55, 0x08, 0x15, 0x38, 0xdb, 0xf5, 0x4c, 0xd7, 0x3f, 0x60, 0x9e, 0x70, 0x2c, 0xce, 0x00, - 0xf2, 0x05, 0xc0, 0x38, 0xde, 0x89, 0x38, 0xa1, 0xbf, 0x17, 0xa7, 0x55, 0x98, 0xf5, 0x4d, 0x67, - 0x64, 0xc7, 0x0d, 0x39, 0xce, 0xa8, 0x8e, 0x10, 0xab, 0x48, 0x45, 0x20, 0x72, 0x13, 0x0a, 0x31, - 0x35, 0xf7, 0x3c, 0x6e, 0xbd, 0x25, 0x2a, 0xbe, 0xf1, 0x32, 0xe4, 0x44, 0x63, 0x15, 0x81, 0x28, - 0x51, 0xb9, 0x20, 0x75, 0xc8, 0x4b, 0xbe, 0xb1, 0x5e, 0x36, 0x37, 0xb9, 0xe0, 0x4d, 0x79, 0x4a, - 0x14, 0x8b, 0xc1, 0x38, 0x84, 0xa4, 0x0e, 0xf3, 0xa9, 0x9a, 0x48, 0x3d, 0x92, 0xe8, 0x95, 0x1e, - 0xc9, 0xaf, 0x67, 0xa0, 0x9c, 0xce, 0x64, 0x7c, 0x13, 0xb2, 0xc1, 0xf1, 0x48, 0x7a, 0x53, 0x5e, - 0xbb, 0x34, 0x3d, 0xdf, 0xd5, 0xb2, 0x7b, 0x3c, 0x62, 0x54, 0xc0, 0x79, 0xdb, 0x97, 0x95, 0x26, - 0xdf, 0x1e, 0x99, 0xbc, 0x20, 0x45, 0xbc, 0xef, 0xf3, 0xd0, 0x1c, 0x31, 0x7b, 0x24, 0x2e, 0xb5, - 0x40, 0xc5, 0x37, 0x97, 0x85, 0xae, 0x15, 0x54, 0x72, 0x52, 0xc6, 0xbf, 0xc9, 0x31, 0xc0, 0x98, - 0x1c, 0x17, 0x61, 0x76, 0xaf, 0xfd, 0x7e, 0x7b, 0xe7, 0x7e, 0x5b, 0xd7, 0xf8, 0xe2, 0xf6, 0xce, - 0x5e, 0xbb, 0xdb, 0xa0, 0x3a, 0xc2, 0x05, 0xc8, 0x6d, 0xd5, 0xf7, 0xb6, 0x1a, 0xfa, 0x0c, 0x9e, - 0x87, 0xc2, 0xdd, 0x66, 0xa7, 0xbb, 0xb3, 0x45, 0xeb, 0x2d, 0x3d, 0x83, 0x31, 0x94, 0x85, 0x66, - 0x2c, 0xcb, 0x72, 0xd3, 0xce, 0x5e, 0xab, 0x55, 0xa7, 0x1f, 0xe9, 0x39, 0x3e, 0x50, 0x35, 0xdb, - 0x9b, 0x3b, 0x7a, 0x1e, 0x97, 0x60, 0xae, 0xd3, 0xad, 0x77, 0x1b, 0x9d, 0x46, 0x57, 0x9f, 0x25, - 0x4d, 0xc8, 0xcb, 0xad, 0xff, 0x71, 0x16, 0x91, 0x1e, 0x94, 0x92, 0x21, 0xc7, 0x97, 0x53, 0x51, - 0x8d, 0xe9, 0x84, 0x3a, 0x11, 0xc5, 0x28, 0x7f, 0x64, 0xf8, 0x26, 0xf2, 0x27, 0x23, 0x84, 0x2a, - 0x7f, 0xbe, 0x42, 0x50, 0x1e, 0xa7, 0xfd, 0xa6, 0x65, 0xb3, 0x7f, 0xa3, 0xcb, 0x54, 0x61, 0xee, - 0xc0, 0xb2, 0x99, 0xf0, 0x41, 0x6e, 0x17, 0xaf, 0xa7, 0x55, 0xe5, 0xff, 0xde, 0x83, 0x42, 0x7c, - 0x04, 0x7e, 0x23, 0x8d, 0x7b, 0x7b, 0xf5, 0x6d, 0x5d, 0xe3, 0x37, 0xd2, 0xde, 0xe9, 0xf6, 0xe4, - 0x12, 0xe1, 0x05, 0x28, 0xd2, 0xc6, 0x56, 0xe3, 0xc3, 0x5e, 0xab, 0xde, 0xbd, 0x7d, 0x57, 0x9f, - 0xe1, 0x57, 0x24, 0x05, 0xed, 0x1d, 0x25, 0xcb, 0xac, 0xfd, 0x9a, 0x83, 0xb9, 0xc8, 0x47, 0x9e, - 0x85, 0xbb, 0xa1, 0x7f, 0x84, 0x97, 0xa7, 0x4d, 0xdd, 0xd5, 0x33, 0x13, 0x52, 0xd5, 0x09, 0x34, - 0xfc, 0x16, 0xe4, 0xc4, 0xa0, 0x86, 0xa7, 0x0e, 0xbe, 0xd5, 0xe9, 0xe3, 0x2c, 0xd1, 0xf0, 0x1d, - 0x28, 0x26, 0x06, 0xbc, 0x53, 0xac, 0x2f, 0xa4, 0xa4, 0xe9, 0x59, 0x90, 0x68, 0xd7, 0x11, 0xbe, - 0x0b, 0xc5, 0xc4, 0x7c, 0x85, 0xab, 0xa9, 0xa4, 0x49, 0x4d, 0x6a, 0x63, 0xae, 0x29, 0x03, 0x19, - 0xd1, 0x70, 0x03, 0x60, 0x3c, 0x5a, 0xe1, 0xf3, 0x29, 0x70, 0x72, 0x06, 0xab, 0x56, 0xa7, 0xa9, - 0x62, 0x9a, 0x0d, 0x28, 0xc4, 0x83, 0x05, 0xae, 0x4c, 0x99, 0x35, 0x24, 0xc9, 0xe9, 0x53, 0x08, - 0xd1, 0xf0, 0x26, 0x94, 0xea, 0xb6, 0xfd, 0x2a, 0x34, 0xd5, 0xa4, 0xc6, 0x9f, 0xe4, 0xb1, 0xe3, - 0x67, 0x76, 0xf2, 0x2d, 0xc7, 0x57, 0xd2, 0x4d, 0xe6, 0xb4, 0x01, 0xa5, 0xfa, 0xdf, 0x3f, 0xc5, - 0xc5, 0xbb, 0x75, 0x61, 0x61, 0xe2, 0x51, 0xc7, 0xc6, 0x84, 0xf5, 0xc4, 0x1c, 0x50, 0xad, 0x9d, - 0xaa, 0x8f, 0x59, 0x5b, 0x50, 0x4e, 0x3f, 0x42, 0xf8, 0xb4, 0x69, 0xbf, 0x1a, 0xef, 0x76, 0xca, - 0xab, 0xa5, 0xad, 0xa0, 0x8d, 0x77, 0x1e, 0x3f, 0x33, 0xb4, 0x27, 0xcf, 0x0c, 0xed, 0xc5, 0x33, - 0x03, 0x7d, 0x79, 0x62, 0xa0, 0xef, 0x4e, 0x0c, 0xf4, 0xe8, 0xc4, 0x40, 0x8f, 0x4f, 0x0c, 0xf4, - 0xf3, 0x89, 0x81, 0x7e, 0x39, 0x31, 0xb4, 0x17, 0x27, 0x06, 0x7a, 0xf8, 0xdc, 0xd0, 0x1e, 0x3f, - 0x37, 0xb4, 0x27, 0xcf, 0x0d, 0xed, 0xe3, 0x7c, 0xdf, 0xb6, 0x98, 0x1b, 0xec, 0xe7, 0xc5, 0x8f, - 0xf0, 0x8d, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x08, 0x1c, 0x63, 0x4f, 0x0f, 0x00, 0x00, + // 1478 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xde, 0xf1, 0xaf, 0xc4, 0xcf, 0x8e, 0xb3, 0x99, 0xa4, 0xad, 0xeb, 0x8a, 0x75, 0x3b, 0x52, + 0x4b, 0x04, 0x34, 0x2d, 0x41, 0x85, 0x1c, 0x40, 0x95, 0xd3, 0x3a, 0xa9, 0x21, 0x76, 0xd2, 0xb1, + 0x43, 0x01, 0x09, 0x59, 0x1b, 0x7b, 0x92, 0xac, 0xba, 0xbb, 0x76, 0xf7, 0x07, 0x22, 0x07, 0x24, + 0x24, 0x8e, 0x1c, 0xe8, 0xb1, 0x7f, 0x02, 0x67, 0x2e, 0xdc, 0x39, 0xf5, 0xd8, 0x63, 0xc5, 0xa1, + 0xa2, 0xee, 0x85, 0x1b, 0x15, 0x7f, 0x01, 0xda, 0x99, 0xd9, 0xf5, 0xae, 0x6b, 0x43, 0x0b, 0xf4, + 0xe6, 0x79, 0xef, 0x9b, 0x6f, 0xde, 0x7e, 0xf3, 0xe6, 0xbd, 0x67, 0x28, 0xf6, 0x06, 0x8e, 0xc7, + 0xbe, 0x5e, 0x1b, 0x3a, 0x03, 0x6f, 0x80, 0x73, 0x62, 0x55, 0xb9, 0x7c, 0x64, 0x78, 0xc7, 0xfe, + 0xc1, 0x5a, 0x6f, 0x60, 0x5d, 0x39, 0x1a, 0x1c, 0x0d, 0xae, 0x70, 0xf7, 0x81, 0x7f, 0xc8, 0x57, + 0x7c, 0xc1, 0x7f, 0x89, 0x6d, 0xe4, 0x4f, 0x04, 0xc5, 0x3b, 0x8e, 0xe1, 0x31, 0xca, 0xee, 0xf9, + 0xcc, 0xf5, 0x70, 0x0b, 0xc0, 0x33, 0x2c, 0xe6, 0x32, 0xc7, 0x60, 0x6e, 0x19, 0x9d, 0x4f, 0xaf, + 0x16, 0xd6, 0xf1, 0x9a, 0x3c, 0xaa, 0x63, 0x58, 0xac, 0xcd, 0x3d, 0x9b, 0x95, 0x87, 0x4f, 0xaa, + 0xca, 0xaf, 0x4f, 0xaa, 0x78, 0xcf, 0x61, 0xba, 0x69, 0x0e, 0x7a, 0x9d, 0x68, 0x17, 0x8d, 0x31, + 0xe0, 0x0f, 0x20, 0xd7, 0x1e, 0xf8, 0x4e, 0x8f, 0x95, 0x53, 0xe7, 0xd1, 0x6a, 0x69, 0xbd, 0x1a, + 0x72, 0xc5, 0x4f, 0x5d, 0x13, 0x90, 0xba, 0xed, 0x5b, 0x54, 0xc2, 0xf1, 0x06, 0xcc, 0x5b, 0xcc, + 0xd3, 0xfb, 0xba, 0xa7, 0x97, 0xd3, 0x3c, 0x8c, 0xd3, 0xe1, 0xd6, 0x26, 0xf3, 0x1c, 0xa3, 0xd7, + 0x94, 0xde, 0xcd, 0xcc, 0xc3, 0x27, 0x55, 0x44, 0x23, 0x34, 0xa9, 0x02, 0x8c, 0xf9, 0xf0, 0x1c, + 0xa4, 0x6b, 0x7b, 0x0d, 0x55, 0xc1, 0xf3, 0x90, 0xa1, 0xfb, 0x3b, 0x75, 0x15, 0x91, 0x45, 0x58, + 0x90, 0xa7, 0xbb, 0xc3, 0x81, 0xed, 0x32, 0xf2, 0x11, 0x14, 0x28, 0xd3, 0xfb, 0xa1, 0x06, 0x6b, + 0x30, 0x77, 0xcf, 0x8f, 0x0b, 0xb0, 0x12, 0x9e, 0x7c, 0xdb, 0x67, 0xce, 0x89, 0x84, 0xd1, 0x10, + 0x44, 0xae, 0x43, 0x51, 0x6c, 0x17, 0x74, 0xf8, 0x0a, 0xcc, 0x39, 0xcc, 0xf5, 0x4d, 0x2f, 0xdc, + 0x7f, 0x6a, 0x62, 0xbf, 0xc0, 0xd1, 0x10, 0x45, 0x1e, 0x20, 0x28, 0xc6, 0xa9, 0xf1, 0x3b, 0x80, + 0x5d, 0x4f, 0x77, 0xbc, 0x2e, 0x57, 0xd2, 0xd3, 0xad, 0x61, 0xd7, 0x0a, 0xc8, 0xd0, 0x6a, 0x9a, + 0xaa, 0xdc, 0xd3, 0x09, 0x1d, 0x4d, 0x17, 0xaf, 0x82, 0xca, 0xec, 0x7e, 0x12, 0x9b, 0xe2, 0xd8, + 0x12, 0xb3, 0xfb, 0x71, 0xe4, 0x55, 0x98, 0xb7, 0x74, 0xaf, 0x77, 0xcc, 0x1c, 0x57, 0x8a, 0x1a, + 0x7d, 0xda, 0x8e, 0x7e, 0xc0, 0xcc, 0xa6, 0x70, 0xd2, 0x08, 0x45, 0x1a, 0xb0, 0x90, 0x08, 0x1a, + 0x6f, 0xbc, 0x64, 0x82, 0x04, 0xb7, 0xa2, 0xc4, 0x53, 0x81, 0xdc, 0x47, 0xb0, 0xcc, 0xb9, 0xda, + 0x9e, 0xc3, 0x74, 0x2b, 0x62, 0xbc, 0x0e, 0x85, 0xde, 0xb1, 0x6f, 0xdf, 0x4d, 0x50, 0x9e, 0x79, + 0x91, 0xf2, 0x46, 0x00, 0x92, 0xbc, 0xf1, 0x1d, 0x13, 0x21, 0xa5, 0x5e, 0x21, 0xa4, 0xef, 0x11, + 0x60, 0xfe, 0xe1, 0x9f, 0xea, 0xa6, 0xcf, 0xdc, 0x50, 0xfe, 0x37, 0x00, 0xcc, 0xc0, 0xda, 0xb5, + 0x75, 0x8b, 0x71, 0xd9, 0xf3, 0x34, 0xcf, 0x2d, 0x2d, 0xdd, 0x62, 0x33, 0x6e, 0x27, 0xf5, 0x0a, + 0xb7, 0x93, 0x9e, 0x76, 0x3b, 0x64, 0x03, 0x96, 0x13, 0xc1, 0x48, 0x7d, 0x2e, 0x40, 0x51, 0x44, + 0xf3, 0x15, 0xb7, 0x73, 0x81, 0xf2, 0xb4, 0x60, 0x8e, 0xa1, 0xe4, 0x2e, 0x2c, 0xed, 0x84, 0xe1, + 0xb9, 0xaf, 0x39, 0x89, 0xc8, 0x35, 0xa9, 0x99, 0x3c, 0x4c, 0x46, 0x59, 0x85, 0xc2, 0x58, 0xb3, + 0x30, 0x48, 0x88, 0x44, 0x73, 0x09, 0x06, 0x75, 0xdf, 0x65, 0x4e, 0xdb, 0xd3, 0xbd, 0x30, 0x44, + 0xf2, 0x33, 0x82, 0xa5, 0x98, 0x51, 0x52, 0x5d, 0x84, 0x92, 0x61, 0x1f, 0x31, 0xd7, 0x33, 0x06, + 0x76, 0xd7, 0xd1, 0x3d, 0x71, 0x05, 0x88, 0x2e, 0x44, 0x56, 0xaa, 0x7b, 0x2c, 0xb8, 0x25, 0xdb, + 0xb7, 0xba, 0xd1, 0xb5, 0xa3, 0xd5, 0x0c, 0xcd, 0xdb, 0xbe, 0x25, 0x6e, 0x3b, 0xf8, 0x7c, 0x7d, + 0x68, 0x74, 0x27, 0x98, 0xd2, 0x9c, 0x49, 0xd5, 0x87, 0x46, 0x23, 0x41, 0xb6, 0x06, 0xcb, 0x8e, + 0x6f, 0xb2, 0x49, 0x78, 0x86, 0xc3, 0x97, 0x02, 0x57, 0x02, 0x4f, 0xbe, 0x84, 0xe5, 0x20, 0xf0, + 0xc6, 0xcd, 0x64, 0xe8, 0x67, 0x60, 0xce, 0x77, 0x99, 0xd3, 0x35, 0xfa, 0x32, 0x6d, 0x72, 0xc1, + 0xb2, 0xd1, 0xc7, 0x97, 0x21, 0xc3, 0x4b, 0x59, 0x10, 0x66, 0x61, 0xfd, 0x6c, 0x98, 0x9d, 0x2f, + 0x7c, 0x3c, 0xe5, 0x30, 0xb2, 0x0d, 0x38, 0x70, 0xb9, 0x49, 0xf6, 0x77, 0x21, 0xeb, 0x06, 0x06, + 0xf9, 0x46, 0xce, 0xc5, 0x59, 0x26, 0x22, 0xa1, 0x02, 0x49, 0x7e, 0x42, 0xa0, 0x89, 0x7a, 0xe9, + 0x6e, 0x0d, 0x9c, 0xf8, 0x23, 0x7f, 0xdd, 0x79, 0x82, 0x37, 0xa0, 0x18, 0x96, 0x91, 0xae, 0xcb, + 0x3c, 0x59, 0x70, 0x4e, 0x4d, 0x2b, 0x38, 0x2e, 0x2d, 0x84, 0xd0, 0x36, 0xf3, 0x48, 0x03, 0xaa, + 0x33, 0x63, 0x96, 0x52, 0x5c, 0x82, 0x9c, 0xc5, 0x21, 0x52, 0x8b, 0x52, 0xb2, 0x39, 0x50, 0xe9, + 0x25, 0x65, 0x38, 0x2d, 0xa9, 0xc2, 0x7e, 0x11, 0xe6, 0x5e, 0x13, 0xce, 0xbc, 0xe0, 0x91, 0xe4, + 0xeb, 0xb1, 0xde, 0x83, 0xfe, 0xae, 0xf7, 0xc4, 0xba, 0xce, 0x2f, 0x08, 0x16, 0x27, 0x6a, 0x55, + 0xa0, 0xd5, 0xa1, 0x33, 0xb0, 0x64, 0x52, 0xc5, 0xd3, 0xa2, 0x14, 0xd8, 0x1b, 0xd2, 0xdc, 0xe8, + 0xc7, 0xf3, 0x26, 0x95, 0xc8, 0x9b, 0xeb, 0x90, 0xe3, 0x6f, 0x28, 0xac, 0xd7, 0x4b, 0x09, 0xf9, + 0xf6, 0x74, 0xc3, 0xd9, 0x5c, 0x91, 0xad, 0xb8, 0xc8, 0x4d, 0xb5, 0xbe, 0x3e, 0xf4, 0x98, 0x43, + 0xe5, 0x36, 0xfc, 0x36, 0xe4, 0x44, 0xad, 0x2c, 0x67, 0x38, 0xc1, 0x42, 0x48, 0x10, 0x2f, 0xa7, + 0x12, 0x42, 0x7e, 0x40, 0x90, 0x15, 0xa1, 0xbf, 0xae, 0xa4, 0xa8, 0xc0, 0x3c, 0xb3, 0x7b, 0x83, + 0xbe, 0x61, 0x1f, 0xf1, 0xb7, 0x98, 0xa5, 0xd1, 0x1a, 0x63, 0xf9, 0x46, 0x82, 0x47, 0x57, 0x94, + 0x0f, 0xa1, 0x0c, 0xa7, 0x3b, 0x8e, 0x6e, 0xbb, 0x87, 0xcc, 0xe1, 0x81, 0x45, 0x19, 0x40, 0xbe, + 0x01, 0x18, 0xeb, 0x1d, 0xd3, 0x09, 0xfd, 0x3b, 0x9d, 0xd6, 0x60, 0xce, 0xd5, 0xad, 0xa1, 0x19, + 0x75, 0x90, 0x28, 0xa3, 0xda, 0xdc, 0x2c, 0x95, 0x0a, 0x41, 0xe4, 0x1a, 0xe4, 0x23, 0xea, 0x20, + 0xf2, 0xa8, 0x55, 0x14, 0x29, 0xff, 0x8d, 0x57, 0x20, 0xcb, 0x0b, 0x36, 0x17, 0xa2, 0x48, 0xc5, + 0x82, 0xd4, 0x20, 0x27, 0xf8, 0xc6, 0x7e, 0x51, 0xdc, 0xc4, 0x22, 0x28, 0xf6, 0x53, 0x54, 0x2c, + 0x78, 0xb1, 0xfa, 0x5b, 0x83, 0x85, 0xc4, 0x9b, 0x48, 0x74, 0x75, 0xf4, 0x52, 0x5d, 0xfd, 0x41, + 0x0a, 0x4a, 0xc9, 0x4c, 0xc6, 0xd7, 0x20, 0xe3, 0x9d, 0x0c, 0x45, 0x34, 0xa5, 0xf5, 0x0b, 0xd3, + 0xf3, 0x5d, 0x2e, 0x3b, 0x27, 0x43, 0x46, 0x39, 0x3c, 0xc8, 0x13, 0xf1, 0xd2, 0xba, 0x87, 0xba, + 0x65, 0x98, 0x27, 0xa2, 0x65, 0x8a, 0x1c, 0x56, 0x85, 0x67, 0x8b, 0x3b, 0x78, 0xe7, 0xc4, 0x90, + 0x39, 0x66, 0xe6, 0x90, 0xdf, 0x70, 0x9e, 0xf2, 0xdf, 0x81, 0xcd, 0xb7, 0x0d, 0xaf, 0x9c, 0x15, + 0xb6, 0xe0, 0x37, 0x39, 0x01, 0x18, 0x9f, 0x84, 0x0b, 0x30, 0xb7, 0xdf, 0xfa, 0xa4, 0xb5, 0x7b, + 0xa7, 0xa5, 0x2a, 0xc1, 0xe2, 0xc6, 0xee, 0x7e, 0xab, 0x53, 0xa7, 0x2a, 0xc2, 0x79, 0xc8, 0x6e, + 0xd7, 0xf6, 0xb7, 0xeb, 0x6a, 0x0a, 0x2f, 0x40, 0xfe, 0x56, 0xa3, 0xdd, 0xd9, 0xdd, 0xa6, 0xb5, + 0xa6, 0x9a, 0xc6, 0x18, 0x4a, 0xdc, 0x33, 0xb6, 0x65, 0x82, 0xad, 0xed, 0xfd, 0x66, 0xb3, 0x46, + 0x3f, 0x57, 0xb3, 0xc1, 0x38, 0xd8, 0x68, 0x6d, 0xed, 0xaa, 0x39, 0x5c, 0x84, 0xf9, 0x76, 0xa7, + 0xd6, 0xa9, 0xb7, 0xeb, 0x1d, 0x75, 0x8e, 0x34, 0x20, 0x27, 0x8e, 0xfe, 0xcf, 0x29, 0x45, 0xba, + 0x50, 0x8c, 0xeb, 0x8f, 0x2f, 0x26, 0x24, 0x8e, 0xe8, 0xb8, 0x3b, 0x26, 0x69, 0x98, 0x4c, 0x42, + 0xc4, 0x89, 0x64, 0x4a, 0x73, 0xa3, 0x4c, 0xa6, 0xef, 0x10, 0x94, 0xc6, 0x6f, 0x60, 0xcb, 0x30, + 0xd9, 0xff, 0x51, 0x72, 0x2a, 0x30, 0x7f, 0x68, 0x98, 0x8c, 0xc7, 0x20, 0x8e, 0x8b, 0xd6, 0xd3, + 0x9e, 0xe8, 0x5b, 0x1f, 0x43, 0x3e, 0xfa, 0x84, 0xe0, 0x46, 0xea, 0xb7, 0xf7, 0x6b, 0x3b, 0xaa, + 0x12, 0xdc, 0x48, 0x6b, 0xb7, 0xd3, 0x15, 0x4b, 0x84, 0x17, 0xa1, 0x40, 0xeb, 0xdb, 0xf5, 0xcf, + 0xba, 0xcd, 0x5a, 0xe7, 0xc6, 0x2d, 0x35, 0x15, 0x5c, 0x91, 0x30, 0xb4, 0x76, 0xa5, 0x2d, 0xbd, + 0xfe, 0x47, 0x16, 0xe6, 0xc3, 0x18, 0x83, 0x94, 0xdc, 0xf3, 0xdd, 0x63, 0xbc, 0x32, 0xed, 0x3f, + 0x43, 0xe5, 0xd4, 0x84, 0x55, 0x96, 0x05, 0x05, 0xbf, 0x0f, 0x59, 0x3e, 0x66, 0xe2, 0xa9, 0x63, + 0x7b, 0x65, 0xfa, 0x30, 0x4e, 0x14, 0x7c, 0x13, 0x0a, 0xb1, 0xf1, 0x74, 0xc6, 0xee, 0x73, 0x09, + 0x6b, 0x72, 0x92, 0x25, 0xca, 0x55, 0x84, 0x6f, 0x41, 0x21, 0x36, 0xc4, 0xe1, 0x4a, 0x22, 0x69, + 0x12, 0x63, 0xe6, 0x98, 0x6b, 0xca, 0xd4, 0x47, 0x14, 0x5c, 0x07, 0x18, 0xcf, 0x59, 0xf8, 0x6c, + 0x02, 0x1c, 0x1f, 0xf4, 0x2a, 0x95, 0x69, 0xae, 0x88, 0x66, 0x13, 0xf2, 0xd1, 0x94, 0x81, 0xcb, + 0x53, 0x06, 0x0f, 0x41, 0x32, 0x7b, 0x24, 0x21, 0x0a, 0xde, 0x82, 0x62, 0xcd, 0x34, 0x5f, 0x86, + 0xa6, 0x12, 0xf7, 0xb8, 0x93, 0x3c, 0x66, 0xd4, 0x73, 0x27, 0x1b, 0x3b, 0xbe, 0x94, 0xac, 0x38, + 0xb3, 0xa6, 0x95, 0xca, 0x9b, 0xff, 0x88, 0x8b, 0x4e, 0xeb, 0xc0, 0xe2, 0x44, 0x87, 0xc7, 0xda, + 0xc4, 0xee, 0x89, 0xa1, 0xa0, 0x52, 0x9d, 0xe9, 0x8f, 0x58, 0x9b, 0x50, 0x4a, 0x76, 0x24, 0x3c, + 0xeb, 0xbf, 0x4a, 0x25, 0x3a, 0x6d, 0x46, 0x0b, 0x53, 0x56, 0xd1, 0xe6, 0x87, 0x8f, 0x9e, 0x6a, + 0xca, 0xe3, 0xa7, 0x9a, 0xf2, 0xfc, 0xa9, 0x86, 0xbe, 0x1d, 0x69, 0xe8, 0xc7, 0x91, 0x86, 0x1e, + 0x8e, 0x34, 0xf4, 0x68, 0xa4, 0xa1, 0xdf, 0x46, 0x1a, 0xfa, 0x7d, 0xa4, 0x29, 0xcf, 0x47, 0x1a, + 0xba, 0xff, 0x4c, 0x53, 0x1e, 0x3d, 0xd3, 0x94, 0xc7, 0xcf, 0x34, 0xe5, 0x8b, 0x5c, 0xcf, 0x34, + 0x98, 0xed, 0x1d, 0xe4, 0xf8, 0xdf, 0xf8, 0xf7, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x73, + 0x78, 0x05, 0x0d, 0x10, 0x00, 0x00, } func (x MatchType) String() string { @@ -1963,6 +1997,12 @@ func (this *LabelValuesRequest) Equal(that interface{}) bool { if this.LabelName != that1.LabelName { return false } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } return true } func (this *LabelValuesResponse) Equal(that interface{}) bool { @@ -2013,6 +2053,12 @@ func (this *LabelNamesRequest) Equal(that interface{}) bool { } else if this == nil { return false } + if this.StartTimestampMs != that1.StartTimestampMs { + return false + } + if this.EndTimestampMs != that1.EndTimestampMs { + return false + } return true } func (this *LabelNamesResponse) Equal(that interface{}) bool { @@ -2507,7 +2553,7 @@ func (this *MetricMetadata) Equal(that interface{}) bool { if this.Type != that1.Type { return false } - if this.MetricName != that1.MetricName { + if this.MetricFamilyName != that1.MetricFamilyName { return false } if this.Help != that1.Help { @@ -2714,9 +2760,11 @@ func (this *LabelValuesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 7) s = append(s, "&client.LabelValuesRequest{") s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2734,8 +2782,10 @@ func (this *LabelNamesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 4) + s := make([]string, 0, 6) s = append(s, "&client.LabelNamesRequest{") + s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") + s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2942,7 +2992,7 @@ func (this *MetricMetadata) GoString() string { s := make([]string, 0, 8) s = append(s, "&client.MetricMetadata{") s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "MetricName: "+fmt.Sprintf("%#v", this.MetricName)+",\n") + s = append(s, "MetricFamilyName: "+fmt.Sprintf("%#v", this.MetricFamilyName)+",\n") s = append(s, "Help: "+fmt.Sprintf("%#v", this.Help)+",\n") s = append(s, "Unit: "+fmt.Sprintf("%#v", this.Unit)+",\n") s = append(s, "}") @@ -3768,6 +3818,16 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x18 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x10 + } if len(m.LabelName) > 0 { i -= len(m.LabelName) copy(dAtA[i:], m.LabelName) @@ -3830,6 +3890,16 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.EndTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -4483,10 +4553,10 @@ func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.MetricName) > 0 { - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintCortex(dAtA, i, uint64(len(m.MetricName))) + if len(m.MetricFamilyName) > 0 { + i -= len(m.MetricFamilyName) + copy(dAtA[i:], m.MetricFamilyName) + i = encodeVarintCortex(dAtA, i, uint64(len(m.MetricFamilyName))) i-- dAtA[i] = 0x12 } @@ -4769,6 +4839,12 @@ func (m *LabelValuesRequest) Size() (n int) { if l > 0 { n += 1 + l + sovCortex(uint64(l)) } + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } return n } @@ -4793,6 +4869,12 @@ func (m *LabelNamesRequest) Size() (n int) { } var l int _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovCortex(uint64(m.EndTimestampMs)) + } return n } @@ -5070,7 +5152,7 @@ func (m *MetricMetadata) Size() (n int) { if m.Type != 0 { n += 1 + sovCortex(uint64(m.Type)) } - l = len(m.MetricName) + l = len(m.MetricFamilyName) if l > 0 { n += 1 + l + sovCortex(uint64(l)) } @@ -5266,6 +5348,8 @@ func (this *LabelValuesRequest) String() string { } s := strings.Join([]string{`&LabelValuesRequest{`, `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, `}`, }, "") return s @@ -5285,6 +5369,8 @@ func (this *LabelNamesRequest) String() string { return "nil" } s := strings.Join([]string{`&LabelNamesRequest{`, + `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, + `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, `}`, }, "") return s @@ -5502,7 +5588,7 @@ func (this *MetricMetadata) String() string { } s := strings.Join([]string{`&MetricMetadata{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricFamilyName:` + fmt.Sprintf("%v", this.MetricFamilyName) + `,`, `Help:` + fmt.Sprintf("%v", this.Help) + `,`, `Unit:` + fmt.Sprintf("%v", this.Unit) + `,`, `}`, @@ -6313,6 +6399,44 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.LabelName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) @@ -6451,6 +6575,44 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) @@ -8122,7 +8284,7 @@ func (m *MetricMetadata) Unmarshal(dAtA []byte) error { } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilyName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8150,7 +8312,7 @@ func (m *MetricMetadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MetricName = string(dAtA[iNdEx:postIndex]) + m.MetricFamilyName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto index 2c3d703ce726c..ebc0f92aa1c08 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto @@ -63,6 +63,8 @@ message QueryStreamResponse { message LabelValuesRequest { string label_name = 1; + int64 start_timestamp_ms = 2; + int64 end_timestamp_ms = 3; } message LabelValuesResponse { @@ -70,6 +72,8 @@ message LabelValuesResponse { } message LabelNamesRequest { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; } message LabelNamesResponse { @@ -161,7 +165,7 @@ message MetricMetadata { } MetricType type = 1; - string metric_name = 2; + string metric_family_name = 2; string help = 4; string unit = 5; } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 32848e7fcb11e..09196efd596aa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -17,7 +17,6 @@ import ( "github.com/prometheus/prometheus/pkg/labels" tsdb_record "github.com/prometheus/prometheus/tsdb/record" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "golang.org/x/time/rate" "google.golang.org/grpc/codes" @@ -25,6 +24,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -392,11 +392,19 @@ func (i *Ingester) stopping(_ error) error { // * Change the state of ring to stop accepting writes. // * Flush all the chunks. func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) { - originalState := i.lifecycler.FlushOnShutdown() + originalFlush := i.lifecycler.FlushOnShutdown() // We want to flush the chunks if transfer fails irrespective of original flag. i.lifecycler.SetFlushOnShutdown(true) + + // In the case of an HTTP shutdown, we want to unregister no matter what. + originalUnregister := i.lifecycler.ShouldUnregisterOnShutdown() + i.lifecycler.SetUnregisterOnShutdown(true) + _ = services.StopAndAwaitTerminated(context.Background(), i) - i.lifecycler.SetFlushOnShutdown(originalState) + // Set state back to original. + i.lifecycler.SetFlushOnShutdown(originalFlush) + i.lifecycler.SetUnregisterOnShutdown(originalUnregister) + w.WriteHeader(http.StatusNoContent) } @@ -432,7 +440,7 @@ func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client. // retain anything from `req` past the call to ReuseSlice defer client.ReuseSlice(req.Timeseries) - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id") } @@ -612,7 +620,7 @@ func (i *Ingester) appendMetadata(userID string, m *client.MetricMetadata) error userMetadata := i.getOrCreateUserMetadata(userID) - return userMetadata.add(m.GetMetricName(), m) + return userMetadata.add(m.GetMetricFamilyName(), m) } func (i *Ingester) getOrCreateUserMetadata(userID string) *userMetricsMetadata { @@ -675,7 +683,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client return i.v2Query(ctx, req) } - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -922,7 +930,7 @@ func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetad } i.userStatesMtx.RUnlock() - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index be319bc9eabc0..d87396ca3d2c2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "math" "net/http" "os" "path/filepath" @@ -16,22 +17,23 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/pkg/gate" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/shipper" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "go.uber.org/atomic" + "golang.org/x/sync/errgroup" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -47,25 +49,138 @@ type Shipper interface { Sync(ctx context.Context) (uploaded int, err error) } +type tsdbState int + +const ( + active tsdbState = iota // Pushes are allowed only in this state. + forceCompacting // TSDB is being force-compacted. + closing // Used while closing idle TSDB. + closed // Used to avoid setting closing back to active in closeAndDeleteIdleUsers method. +) + +// Describes result of TSDB-close check. String is used as metric label. +type tsdbCloseCheckResult string + +const ( + tsdbIdle tsdbCloseCheckResult = "idle" // Not reported via metrics. Metrics use tsdbIdleClosed on success. + tsdbShippingDisabled tsdbCloseCheckResult = "shipping_disabled" + tsdbNotIdle tsdbCloseCheckResult = "not_idle" + tsdbNotCompacted tsdbCloseCheckResult = "not_compacted" + tsdbNotShipped tsdbCloseCheckResult = "not_shipped" + tsdbCheckFailed tsdbCloseCheckResult = "check_failed" + tsdbCloseFailed tsdbCloseCheckResult = "close_failed" + tsdbNotActive tsdbCloseCheckResult = "not_active" + tsdbDataRemovalFailed tsdbCloseCheckResult = "data_removal_failed" + tsdbTenantMarkedForDeletion tsdbCloseCheckResult = "tenant_marked_for_deletion" + tsdbIdleClosed tsdbCloseCheckResult = "idle_closed" // Success. +) + +func (r tsdbCloseCheckResult) shouldClose() bool { + return r == tsdbIdle || r == tsdbTenantMarkedForDeletion +} + type userTSDB struct { - *tsdb.DB + db *tsdb.DB userID string refCache *cortex_tsdb.RefCache activeSeries *ActiveSeries seriesInMetric *metricCounter limiter *Limiter + stateMtx sync.RWMutex + state tsdbState + pushesInFlight sync.WaitGroup // Increased with Read lock held, only if state == active. + // Used to detect idle TSDBs. - lastUpdate *atomic.Int64 + lastUpdate atomic.Int64 // Thanos shipper used to ship blocks to the storage. shipper Shipper + // When deletion marker is found for the tenant (checked before shipping), + // shipping stops and TSDB is closed before reaching idle timeout time (if enabled). + deletionMarkFound atomic.Bool + + // Unix timestamp of last deletion mark check. + lastDeletionMarkCheck atomic.Int64 + // for statistics ingestedAPISamples *ewmaRate ingestedRuleSamples *ewmaRate } +// Explicitly wrapping the tsdb.DB functions that we use. + +func (u *userTSDB) Appender(ctx context.Context) storage.Appender { + return u.db.Appender(ctx) +} + +func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return u.db.Querier(ctx, mint, maxt) +} + +func (u *userTSDB) Head() *tsdb.Head { + return u.db.Head() +} + +func (u *userTSDB) Blocks() []*tsdb.Block { + return u.db.Blocks() +} + +func (u *userTSDB) Close() error { + return u.db.Close() +} + +func (u *userTSDB) Compact() error { + return u.db.Compact() +} + +func (u *userTSDB) StartTime() (int64, error) { + return u.db.StartTime() +} + +func (u *userTSDB) casState(from, to tsdbState) bool { + u.stateMtx.Lock() + defer u.stateMtx.Unlock() + + if u.state != from { + return false + } + u.state = to + return true +} + +// compactHead compacts the Head block at specified block durations avoiding a single huge block. +func (u *userTSDB) compactHead(blockDuration int64) error { + if !u.casState(active, forceCompacting) { + return errors.New("TSDB head cannot be compacted because it is not in active state (possibly being closed)") + } + + defer u.casState(forceCompacting, active) + + // Ingestion of samples in parallel with forced compaction can lead to overlapping blocks. + // So we wait for existing in-flight requests to finish. Future push requests would fail until compaction is over. + u.pushesInFlight.Wait() + + h := u.Head() + + minTime, maxTime := h.MinTime(), h.MaxTime() + + for (minTime/blockDuration)*blockDuration != (maxTime/blockDuration)*blockDuration { + // Data in Head spans across multiple block ranges, so we break it into blocks here. + // Block max time is exclusive, so we do a -1 here. + blockMaxTime := ((minTime/blockDuration)+1)*blockDuration - 1 + if err := u.db.CompactHead(tsdb.NewRangeHead(h, minTime, blockMaxTime)); err != nil { + return err + } + + // Get current min/max times after compaction. + minTime, maxTime = h.MinTime(), h.MaxTime() + } + + return u.db.CompactHead(tsdb.NewRangeHead(h, minTime, maxTime)) +} + // PreCreation implements SeriesLifecycleCallback interface. func (u *userTSDB) PreCreation(metric labels.Labels) error { if u.limiter == nil { @@ -73,7 +188,7 @@ func (u *userTSDB) PreCreation(metric labels.Labels) error { } // Total series limit. - if err := u.limiter.AssertMaxSeriesPerUser(u.userID, int(u.DB.Head().NumSeries())); err != nil { + if err := u.limiter.AssertMaxSeriesPerUser(u.userID, int(u.Head().NumSeries())); err != nil { return makeLimitError(perUserSeriesLimit, err) } @@ -113,15 +228,15 @@ func (u *userTSDB) PostDeletion(metrics ...labels.Labels) { // blocksToDelete filters the input blocks and returns the blocks which are safe to be deleted from the ingester. func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { - if u.DB == nil { + if u.db == nil { return nil } - deletable := tsdb.DefaultBlocksToDelete(u.DB)(blocks) + deletable := tsdb.DefaultBlocksToDelete(u.db)(blocks) if u.shipper == nil { return deletable } - shipperMeta, err := shipper.ReadMetaFile(u.Dir()) + shippedBlocks, err := u.getShippedBlocks() if err != nil { // If there is any issue with the shipper, we should be conservative and not delete anything. level.Error(util.Logger).Log("msg", "failed to read shipper meta during deletion of blocks", "user", u.userID, "err", err) @@ -129,7 +244,7 @@ func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { } result := map[ulid.ULID]struct{}{} - for _, shippedID := range shipperMeta.Uploaded { + for _, shippedID := range shippedBlocks { if _, ok := deletable[shippedID]; ok { result[shippedID] = struct{}{} } @@ -137,6 +252,15 @@ func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { return result } +func (u *userTSDB) getShippedBlocks() ([]ulid.ULID, error) { + shipperMeta, err := shipper.ReadMetaFile(u.db.Dir()) + if err != nil { + return nil, err + } + + return shipperMeta.Uploaded, nil +} + func (u *userTSDB) isIdle(now time.Time, idle time.Duration) bool { lu := u.lastUpdate.Load() @@ -147,6 +271,41 @@ func (u *userTSDB) setLastUpdate(t time.Time) { u.lastUpdate.Store(t.Unix()) } +// Checks if TSDB can be closed. +func (u *userTSDB) shouldCloseTSDB(idleTimeout time.Duration) (tsdbCloseCheckResult, error) { + if u.deletionMarkFound.Load() { + return tsdbTenantMarkedForDeletion, nil + } + + if !u.isIdle(time.Now(), idleTimeout) { + return tsdbNotIdle, nil + } + + // If head is not compacted, we cannot close this yet. + if u.Head().NumSeries() > 0 { + return tsdbNotCompacted, nil + } + + // Verify that all blocks have been shipped. + shipped, err := u.getShippedBlocks() + if err != nil { + return tsdbCheckFailed, errors.Wrapf(err, "failed to read shipper meta") + } + + shippedMap := make(map[ulid.ULID]bool, len(shipped)) + for _, b := range shipped { + shippedMap[b] = true + } + + for _, b := range u.Blocks() { + if !shippedMap[b.Meta().ULID] { + return tsdbNotShipped, nil + } + } + + return tsdbIdle, nil +} + // TSDBState holds data structures used by the TSDB storage engine type TSDBState struct { dbs map[string]*userTSDB // tsdb sharded by userID @@ -169,9 +328,26 @@ type TSDBState struct { appenderAddDuration prometheus.Histogram appenderCommitDuration prometheus.Histogram refCachePurgeDuration prometheus.Histogram + idleTsdbChecks *prometheus.CounterVec } func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer) TSDBState { + idleTsdbChecks := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_idle_tsdb_checks_total", + Help: "The total number of various results for idle TSDB checks.", + }, []string{"result"}) + + idleTsdbChecks.WithLabelValues(string(tsdbShippingDisabled)) + idleTsdbChecks.WithLabelValues(string(tsdbNotIdle)) + idleTsdbChecks.WithLabelValues(string(tsdbNotCompacted)) + idleTsdbChecks.WithLabelValues(string(tsdbNotShipped)) + idleTsdbChecks.WithLabelValues(string(tsdbCheckFailed)) + idleTsdbChecks.WithLabelValues(string(tsdbCloseFailed)) + idleTsdbChecks.WithLabelValues(string(tsdbNotActive)) + idleTsdbChecks.WithLabelValues(string(tsdbDataRemovalFailed)) + idleTsdbChecks.WithLabelValues(string(tsdbTenantMarkedForDeletion)) + idleTsdbChecks.WithLabelValues(string(tsdbIdleClosed)) + return TSDBState{ dbs: make(map[string]*userTSDB), bucket: bucketClient, @@ -208,12 +384,14 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer Help: "The total time it takes to purge the TSDB series reference cache for a single tenant.", Buckets: prometheus.DefBuckets, }), + + idleTsdbChecks: idleTsdbChecks, } } // NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) + bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -266,7 +444,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, // Special version of ingester used by Flusher. This ingester is not ingesting anything, its only purpose is to react // on Flush method and flush all openened TSDBs when called. func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) { - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) + bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -325,6 +503,15 @@ func (i *Ingester) startingV2(ctx context.Context) error { servs = append(servs, shippingService) } + if i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout > 0 { + interval := i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBInterval + if interval == 0 { + interval = cortex_tsdb.DefaultCloseIdleTSDBInterval + } + closeIdleService := services.NewTimerService(interval, nil, i.closeAndDeleteIdleUserTSDBs, nil) + servs = append(servs, closeIdleService) + } + var err error i.TSDBState.subservices, err = services.NewManager(servs...) if err == nil { @@ -437,7 +624,7 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien // retain anything from `req` past the call to ReuseSlice defer client.ReuseSlice(req.Timeseries) - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id") } @@ -455,6 +642,11 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien } i.userStatesMtx.RUnlock() + if err := db.acquireAppendLock(); err != nil { + return &client.WriteResponse{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error()) + } + defer db.releaseAppendLock() + // Given metadata is a best-effort approach, and we don't halt on errors // process it before samples. Otherwise, we risk returning an error before ingestion. i.pushMetadata(ctx, userID, req.GetMetadata()) @@ -605,8 +797,31 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien return &client.WriteResponse{}, nil } +func (u *userTSDB) acquireAppendLock() error { + u.stateMtx.RLock() + defer u.stateMtx.RUnlock() + + if u.state != active { + switch u.state { + case forceCompacting: + return errors.New("forced compaction in progress") + case closing: + return errors.New("TSDB is closing") + default: + return errors.New("TSDB is not active") + } + } + + u.pushesInFlight.Add(1) + return nil +} + +func (u *userTSDB) releaseAppendLock() { + u.pushesInFlight.Done() +} + func (i *Ingester) v2Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -662,7 +877,7 @@ func (i *Ingester) v2Query(ctx context.Context, req *client.QueryRequest) (*clie } func (i *Ingester) v2LabelValues(ctx context.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -672,10 +887,12 @@ func (i *Ingester) v2LabelValues(ctx context.Context, req *client.LabelValuesReq return &client.LabelValuesResponse{}, nil } - // Since ingester may run with a variable TSDB retention which could be few days long, - // we only query the TSDB head time range in order to avoid heavy queries (which could - // lead to ingesters out-of-memory) in case the TSDB retention is several days. - q, err := db.Querier(ctx, db.Head().MinTime(), db.Head().MaxTime()) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) + if err != nil { + return nil, err + } + + q, err := db.Querier(ctx, mint, maxt) if err != nil { return nil, err } @@ -692,7 +909,7 @@ func (i *Ingester) v2LabelValues(ctx context.Context, req *client.LabelValuesReq } func (i *Ingester) v2LabelNames(ctx context.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -702,10 +919,12 @@ func (i *Ingester) v2LabelNames(ctx context.Context, req *client.LabelNamesReque return &client.LabelNamesResponse{}, nil } - // Since ingester may run with a variable TSDB retention which could be few days long, - // we only query the TSDB head time range in order to avoid heavy queries (which could - // lead to ingesters out-of-memory) in case the TSDB retention is several days. - q, err := db.Querier(ctx, db.Head().MinTime(), db.Head().MaxTime()) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) + if err != nil { + return nil, err + } + + q, err := db.Querier(ctx, mint, maxt) if err != nil { return nil, err } @@ -722,7 +941,7 @@ func (i *Ingester) v2LabelNames(ctx context.Context, req *client.LabelNamesReque } func (i *Ingester) v2MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (*client.MetricsForLabelMatchersResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -738,10 +957,12 @@ func (i *Ingester) v2MetricsForLabelMatchers(ctx context.Context, req *client.Me return nil, err } - // Since ingester may run with a variable TSDB retention which could be few days long, - // we only query the TSDB head time range in order to avoid heavy queries (which could - // lead to ingesters out-of-memory) in case the TSDB retention is several days. - q, err := db.Querier(ctx, db.Head().MinTime(), db.Head().MaxTime()) + mint, maxt, err := metadataQueryRange(req.StartTimestampMs, req.EndTimestampMs, db) + if err != nil { + return nil, err + } + + q, err := db.Querier(ctx, mint, maxt) if err != nil { return nil, err } @@ -781,7 +1002,7 @@ func (i *Ingester) v2MetricsForLabelMatchers(ctx context.Context, req *client.Me } func (i *Ingester) v2UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -830,7 +1051,7 @@ func (i *Ingester) v2QueryStream(req *client.QueryRequest, stream client.Ingeste log, ctx := spanlogger.New(stream.Context(), "v2QueryStream") defer log.Finish() - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return err } @@ -994,19 +1215,20 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { seriesInMetric: newMetricCounter(i.limiter), ingestedAPISamples: newEWMARate(0.2, i.cfg.RateUpdatePeriod), ingestedRuleSamples: newEWMARate(0.2, i.cfg.RateUpdatePeriod), - lastUpdate: atomic.NewInt64(0), } // Create a new user database db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ - RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), - MinBlockDuration: blockRanges[0], - MaxBlockDuration: blockRanges[len(blockRanges)-1], - NoLockfile: true, - StripeSize: i.cfg.BlocksStorageConfig.TSDB.StripeSize, - WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, - SeriesLifecycleCallback: userDB, - BlocksToDelete: userDB.blocksToDelete, + RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), + MinBlockDuration: blockRanges[0], + MaxBlockDuration: blockRanges[len(blockRanges)-1], + NoLockfile: true, + StripeSize: i.cfg.BlocksStorageConfig.TSDB.StripeSize, + HeadChunksWriteBufferSize: i.cfg.BlocksStorageConfig.TSDB.HeadChunksWriteBufferSize, + WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, + WALSegmentSize: i.cfg.BlocksStorageConfig.TSDB.WALSegmentSizeBytes, + SeriesLifecycleCallback: userDB, + BlocksToDelete: userDB.blocksToDelete, }) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) @@ -1022,11 +1244,19 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { return nil, errors.Wrapf(err, "failed to compact TSDB: %s", udir) } - userDB.DB = db + userDB.db = db // We set the limiter here because we don't want to limit // series during WAL replay. userDB.limiter = i.limiter - userDB.setLastUpdate(time.Now()) // After WAL replay. + + if db.Head().NumSeries() > 0 { + // If there are series in the head, use max time from head. If this time is too old, + // TSDB will be eligible for flushing and closing sooner, unless more data is pushed to it quickly. + userDB.setLastUpdate(util.TimeFromMillis(db.Head().MaxTime())) + } else { + // If head is empty (eg. new TSDB), don't close it right after. + userDB.setLastUpdate(time.Now()) + } // Thanos shipper requires at least 1 external label to be set. For this reason, // we set the tenant ID as external label and we'll filter it out when reading @@ -1047,7 +1277,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { userLogger, tsdbPromReg, udir, - cortex_tsdb.NewUserBucketClient(userID, i.TSDBState.bucket), + bucket.NewUserBucketClient(userID, i.TSDBState.bucket), func() labels.Labels { return l }, metadata.ReceiveSource, false, // No need to upload compacted blocks. Cortex compactor takes care of that. @@ -1096,98 +1326,100 @@ func (i *Ingester) closeAllTSDB() { // concurrently opening TSDB. func (i *Ingester) openExistingTSDB(ctx context.Context) error { level.Info(util.Logger).Log("msg", "opening existing TSDBs") - wg := &sync.WaitGroup{} - openGate := gate.New(i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup) - // Keep track of all errors that could occur. - errs := tsdb_errors.MultiError{} - errsMx := sync.Mutex{} + queue := make(chan string) + group, groupCtx := errgroup.WithContext(ctx) - walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - // If the root directory doesn't exist, we're OK (not needed to be created upfront). - if os.IsNotExist(err) && path == i.cfg.BlocksStorageConfig.TSDB.Dir { - return filepath.SkipDir - } + // Create a pool of workers which will open existing TSDBs. + for n := 0; n < i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup; n++ { + group.Go(func() error { + for userID := range queue { + startTime := time.Now() - level.Error(util.Logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) - return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) - } + db, err := i.createTSDB(userID) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) + return errors.Wrapf(err, "unable to open TSDB for user %s", userID) + } + + // Add the database to the map of user databases + i.userStatesMtx.Lock() + i.TSDBState.dbs[userID] = db + i.userStatesMtx.Unlock() + i.metrics.memUsers.Inc() + + i.TSDBState.walReplayTime.Observe(time.Since(startTime).Seconds()) + } - // Skip root dir and all other files - if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { return nil - } + }) + } - // Top level directories are assumed to be user TSDBs - userID := info.Name() - f, err := os.Open(path) - if err != nil { - level.Error(util.Logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) - } - defer f.Close() + // Spawn a goroutine to find all users with a TSDB on the filesystem. + group.Go(func() error { + // Close the queue once filesystem walking is done. + defer close(queue) - // If the dir is empty skip it - if _, err := f.Readdirnames(1); err != nil { - if err == io.EOF { - return filepath.SkipDir + walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + // If the root directory doesn't exist, we're OK (not needed to be created upfront). + if os.IsNotExist(err) && path == i.cfg.BlocksStorageConfig.TSDB.Dir { + return filepath.SkipDir + } + + level.Error(util.Logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) + return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) } - level.Error(util.Logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) - } + // Skip root dir and all other files + if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { + return nil + } - // Limit the number of TSDB's opening concurrently. Start blocks until there's a free spot available or the context is cancelled. - if err := openGate.Start(ctx); err != nil { - return err - } + // Top level directories are assumed to be user TSDBs + userID := info.Name() + f, err := os.Open(path) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) + return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) + } + defer f.Close() - wg.Add(1) - go func(userID string) { - defer wg.Done() - defer openGate.Done() - defer func(ts time.Time) { - i.TSDBState.walReplayTime.Observe(time.Since(ts).Seconds()) - }(time.Now()) + // If the dir is empty skip it + if _, err := f.Readdirnames(1); err != nil { + if err == io.EOF { + return filepath.SkipDir + } - db, err := i.createTSDB(userID) - if err != nil { - errsMx.Lock() - errs.Add(errors.Wrapf(err, "unable to open TSDB for user %s", userID)) - errsMx.Unlock() + level.Error(util.Logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) + return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) + } - level.Error(util.Logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) - return + // Enqueue the user to be processed. + select { + case queue <- userID: + // Nothing to do. + case <-groupCtx.Done(): + // Interrupt in case a failure occurred in another goroutine. + return nil } - // Add the database to the map of user databases - i.userStatesMtx.Lock() - i.TSDBState.dbs[userID] = db - i.userStatesMtx.Unlock() - i.metrics.memUsers.Inc() - }(userID) + // Don't descend into subdirectories. + return filepath.SkipDir + }) - return filepath.SkipDir // Don't descend into directories + return errors.Wrapf(walkErr, "unable to walk directory %s containing existing TSDBs", i.cfg.BlocksStorageConfig.TSDB.Dir) }) - if walkErr != nil { - errsMx.Lock() - errs.Add(errors.Wrapf(walkErr, "unable to walk directory %s containing existing TSDBs", i.cfg.BlocksStorageConfig.TSDB.Dir)) - errsMx.Unlock() - } - - // Wait for all opening routines to finish - wg.Wait() - - // Ensure no error occurred. - if errs.Err() == nil { - level.Info(util.Logger).Log("msg", "successfully opened existing TSDBs") - return nil + // Wait for all workers to complete. + err := group.Wait() + if err != nil { + level.Error(util.Logger).Log("msg", "error while opening existing TSDBs", "err", err) + return err } - level.Error(util.Logger).Log("msg", "error while opening existing TSDBs", "err", errs.Error()) - return errs.Err() + level.Info(util.Logger).Log("msg", "successfully opened existing TSDBs") + return nil } // numSeriesInTSDB returns the total number of in-memory series across all open TSDBs. @@ -1241,11 +1473,32 @@ func (i *Ingester) shipBlocks(ctx context.Context) { // Number of concurrent workers is limited in order to avoid to concurrently sync a lot // of tenants in a large cluster. - i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.ShipConcurrency, func(userID string) { + _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.ShipConcurrency, func(ctx context.Context, userID string) error { // Get the user's DB. If the user doesn't exist, we skip it. userDB := i.getTSDB(userID) if userDB == nil || userDB.shipper == nil { - return + return nil + } + + if userDB.deletionMarkFound.Load() { + return nil + } + + if time.Since(time.Unix(userDB.lastDeletionMarkCheck.Load(), 0)) > cortex_tsdb.DeletionMarkCheckInterval { + // Even if check fails with error, we don't want to repeat it too often. + userDB.lastDeletionMarkCheck.Store(time.Now().Unix()) + + deletionMarkExists, err := cortex_tsdb.TenantDeletionMarkExists(ctx, i.TSDBState.bucket, userID) + if err != nil { + // If we cannot check for deletion mark, we continue anyway, even though in production shipper will likely fail too. + // This however simplifies unit tests, where tenant deletion check is enabled by default, but tests don't setup bucket. + level.Warn(util.Logger).Log("msg", "failed to check for tenant deletion mark before shipping blocks", "user", userID, "err", err) + } else if deletionMarkExists { + userDB.deletionMarkFound.Store(true) + + level.Info(util.Logger).Log("msg", "tenant deletion mark exists, not shipping blocks", "user", userID) + return nil + } } // Run the shipper's Sync() to upload unshipped blocks. @@ -1254,6 +1507,8 @@ func (i *Ingester) shipBlocks(ctx context.Context) { } else { level.Debug(util.Logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) } + + return nil }) } @@ -1293,16 +1548,16 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { } } - i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(userID string) { + _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(ctx context.Context, userID string) error { userDB := i.getTSDB(userID) if userDB == nil { - return + return nil } // Don't do anything, if there is nothing to compact. h := userDB.Head() if h.NumSeries() == 0 { - return + return nil } var err error @@ -1313,12 +1568,12 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { switch { case force: reason = "forced" - err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime())) + err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds()) case i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout): reason = "idle" level.Info(util.Logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) - err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime())) + err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds()) default: reason = "regular" @@ -1331,39 +1586,92 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { } else { level.Debug(util.Logger).Log("msg", "TSDB blocks compaction completed successfully", "user", userID, "compactReason", reason) } + + return nil }) } -func (i *Ingester) runConcurrentUserWorkers(ctx context.Context, concurrency int, userFunc func(userID string)) { - wg := sync.WaitGroup{} - ch := make(chan string) +func (i *Ingester) closeAndDeleteIdleUserTSDBs(ctx context.Context) error { + for _, userID := range i.getTSDBUsers() { + if ctx.Err() != nil { + return nil + } - for ix := 0; ix < concurrency; ix++ { - wg.Add(1) - go func() { - defer wg.Done() + result := i.closeAndDeleteUserTSDBIfIdle(userID) - for userID := range ch { - userFunc(userID) - } - }() + i.TSDBState.idleTsdbChecks.WithLabelValues(string(result)).Inc() } -sendLoop: - for _, userID := range i.getTSDBUsers() { - select { - case ch <- userID: - // ok - case <-ctx.Done(): - // don't start new tasks. - break sendLoop + return nil +} + +func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckResult { + userDB := i.getTSDB(userID) + if userDB == nil || userDB.shipper == nil { + // We will not delete local data when not using shipping to storage. + return tsdbShippingDisabled + } + + if result, err := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { + if err != nil { + level.Error(util.Logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) } + return result } - close(ch) + // This disables pushes and force-compactions. + if !userDB.casState(active, closing) { + return tsdbNotActive + } - // wait for ongoing workers to finish. - wg.Wait() + // If TSDB is fully closed, we will set state to 'closed', which will prevent this defered closing -> active transition. + defer userDB.casState(closing, active) + + // Make sure we don't ignore any possible inflight pushes. + userDB.pushesInFlight.Wait() + + // Verify again, things may have changed during the checks and pushes. + tenantDeleted := false + if result, err := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { + if err != nil { + level.Error(util.Logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) + } + return result + } else if result == tsdbTenantMarkedForDeletion { + tenantDeleted = true + } + + dir := userDB.db.Dir() + + if err := userDB.Close(); err != nil { + level.Error(util.Logger).Log("msg", "failed to close idle TSDB", "user", userID, "err", err) + return tsdbCloseFailed + } + + level.Info(util.Logger).Log("msg", "closed idle TSDB", "user", userID) + + // This will prevent going back to "active" state in deferred statement. + userDB.casState(closing, closed) + + i.userStatesMtx.Lock() + delete(i.TSDBState.dbs, userID) + i.userStatesMtx.Unlock() + + i.TSDBState.tsdbMetrics.removeRegistryForUser(userID) + + // And delete local data. + if err := os.RemoveAll(dir); err != nil { + level.Error(util.Logger).Log("msg", "failed to delete local TSDB", "user", userID, "err", err) + return tsdbDataRemovalFailed + } + + if tenantDeleted { + level.Info(util.Logger).Log("msg", "deleted local TSDB, user marked for deletion", "user", userID, "dir", dir) + return tsdbTenantMarkedForDeletion + } + + level.Info(util.Logger).Log("msg", "deleted local TSDB, due to being idle", "user", userID, "dir", dir) + return tsdbIdleClosed } // This method will flush all data. It is called as part of Lifecycler's shutdown (if flush on shutdown is configured), or from the flusher. @@ -1440,3 +1748,30 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNoContent) } + +// metadataQueryRange returns the best range to query for metadata queries based on the timerange in the ingester. +func metadataQueryRange(queryStart, queryEnd int64, db *userTSDB) (mint, maxt int64, err error) { + // Ingesters are run with limited retention and we don't support querying the store-gateway for labels yet. + // This means if someone loads a dashboard that is outside the range of the ingester, and we only return the + // data for the timerange requested (which will be empty), the dashboards will break. To fix this we should + // return the "head block" range until we can query the store-gateway. + + // Now the question would be what to do when the query is partially in the ingester range. I would err on the side + // of caution and query the entire db, as I can't think of a good way to query the head + the overlapping range. + mint, maxt = queryStart, queryEnd + + lowestTs, err := db.StartTime() + if err != nil { + return mint, maxt, err + } + + // Completely outside. + if queryEnd < lowestTs { + mint, maxt = db.Head().MinTime(), db.Head().MaxTime() + } else if queryStart < lowestTs { + // Partially inside. + mint, maxt = 0, math.MaxInt64 + } + + return +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go index 54fa269245169..8b60c8dc0ae95 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go @@ -1,8 +1,6 @@ package ingester import ( - "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -232,10 +230,14 @@ type tsdbMetrics struct { tsdbFsyncDuration *prometheus.Desc tsdbPageFlushes *prometheus.Desc tsdbPageCompletions *prometheus.Desc - tsdbTruncateFail *prometheus.Desc - tsdbTruncateTotal *prometheus.Desc - tsdbTruncateDuration *prometheus.Desc - tsdbWritesFailed *prometheus.Desc + tsdbWALTruncateFail *prometheus.Desc + tsdbWALTruncateTotal *prometheus.Desc + tsdbWALTruncateDuration *prometheus.Desc + tsdbWALCorruptionsTotal *prometheus.Desc + tsdbWALWritesFailed *prometheus.Desc + tsdbHeadTruncateFail *prometheus.Desc + tsdbHeadTruncateTotal *prometheus.Desc + tsdbHeadGcDuration *prometheus.Desc tsdbActiveAppenders *prometheus.Desc tsdbSeriesNotFound *prometheus.Desc tsdbChunks *prometheus.Desc @@ -252,13 +254,12 @@ type tsdbMetrics struct { memSeriesCreatedTotal *prometheus.Desc memSeriesRemovedTotal *prometheus.Desc - regsMu sync.RWMutex // custom mutex for shipper registry, to avoid blocking main user state mutex on collection - regs map[string]*prometheus.Registry // One prometheus registry per tenant + regs *util.UserRegistries } func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { m := &tsdbMetrics{ - regs: make(map[string]*prometheus.Registry), + regs: util.NewUserRegistries(), dirSyncs: prometheus.NewDesc( "cortex_ingester_shipper_dir_syncs_total", @@ -296,22 +297,38 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { "cortex_ingester_tsdb_wal_completed_pages_total", "Total number of TSDB WAL completed pages.", nil, nil), - tsdbTruncateFail: prometheus.NewDesc( + tsdbWALTruncateFail: prometheus.NewDesc( "cortex_ingester_tsdb_wal_truncations_failed_total", "Total number of TSDB WAL truncations that failed.", nil, nil), - tsdbTruncateTotal: prometheus.NewDesc( + tsdbWALTruncateTotal: prometheus.NewDesc( "cortex_ingester_tsdb_wal_truncations_total", "Total number of TSDB WAL truncations attempted.", nil, nil), - tsdbTruncateDuration: prometheus.NewDesc( + tsdbWALTruncateDuration: prometheus.NewDesc( "cortex_ingester_tsdb_wal_truncate_duration_seconds", "Duration of TSDB WAL truncation.", nil, nil), - tsdbWritesFailed: prometheus.NewDesc( + tsdbWALCorruptionsTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_wal_corruptions_total", + "Total number of TSDB WAL corruptions.", + nil, nil), + tsdbWALWritesFailed: prometheus.NewDesc( "cortex_ingester_tsdb_wal_writes_failed_total", "Total number of TSDB WAL writes that failed.", nil, nil), + tsdbHeadTruncateFail: prometheus.NewDesc( + "cortex_ingester_tsdb_head_truncations_failed_total", + "Total number of TSDB head truncations that failed.", + nil, nil), + tsdbHeadTruncateTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_head_truncations_total", + "Total number of TSDB head truncations attempted.", + nil, nil), + tsdbHeadGcDuration: prometheus.NewDesc( + "cortex_ingester_tsdb_head_gc_duration_seconds", + "Runtime of garbage collection in the TSDB head.", + nil, nil), tsdbActiveAppenders: prometheus.NewDesc( "cortex_ingester_tsdb_head_active_appenders", "Number of currently active TSDB appender transactions.", @@ -374,10 +391,14 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.tsdbFsyncDuration out <- sm.tsdbPageFlushes out <- sm.tsdbPageCompletions - out <- sm.tsdbTruncateFail - out <- sm.tsdbTruncateTotal - out <- sm.tsdbTruncateDuration - out <- sm.tsdbWritesFailed + out <- sm.tsdbWALTruncateFail + out <- sm.tsdbWALTruncateTotal + out <- sm.tsdbWALTruncateDuration + out <- sm.tsdbWALCorruptionsTotal + out <- sm.tsdbWALWritesFailed + out <- sm.tsdbHeadTruncateFail + out <- sm.tsdbHeadTruncateTotal + out <- sm.tsdbHeadGcDuration out <- sm.tsdbActiveAppenders out <- sm.tsdbSeriesNotFound out <- sm.tsdbChunks @@ -391,11 +412,10 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.memSeriesCreatedTotal out <- sm.memSeriesRemovedTotal - } func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { - data := util.BuildMetricFamiliesPerUserFromUserRegistries(sm.registries()) + data := sm.regs.BuildMetricFamiliesPerUser() // OK, we have it all. Let's build results. data.SendSumOfCounters(out, sm.dirSyncs, "thanos_shipper_dir_syncs_total") @@ -408,10 +428,14 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfSummaries(out, sm.tsdbFsyncDuration, "prometheus_tsdb_wal_fsync_duration_seconds") data.SendSumOfCounters(out, sm.tsdbPageFlushes, "prometheus_tsdb_wal_page_flushes_total") data.SendSumOfCounters(out, sm.tsdbPageCompletions, "prometheus_tsdb_wal_completed_pages_total") - data.SendSumOfCounters(out, sm.tsdbTruncateFail, "prometheus_tsdb_wal_truncations_failed_total") - data.SendSumOfCounters(out, sm.tsdbTruncateTotal, "prometheus_tsdb_wal_truncations_total") - data.SendSumOfSummaries(out, sm.tsdbTruncateDuration, "prometheus_tsdb_wal_truncate_duration_seconds") - data.SendSumOfCounters(out, sm.tsdbWritesFailed, "prometheus_tsdb_wal_writes_failed_total") + data.SendSumOfCounters(out, sm.tsdbWALTruncateFail, "prometheus_tsdb_wal_truncations_failed_total") + data.SendSumOfCounters(out, sm.tsdbWALTruncateTotal, "prometheus_tsdb_wal_truncations_total") + data.SendSumOfSummaries(out, sm.tsdbWALTruncateDuration, "prometheus_tsdb_wal_truncate_duration_seconds") + data.SendSumOfCounters(out, sm.tsdbWALCorruptionsTotal, "prometheus_tsdb_wal_corruptions_total") + data.SendSumOfCounters(out, sm.tsdbWALWritesFailed, "prometheus_tsdb_wal_writes_failed_total") + data.SendSumOfCounters(out, sm.tsdbHeadTruncateFail, "prometheus_tsdb_head_truncations_failed_total") + data.SendSumOfCounters(out, sm.tsdbHeadTruncateTotal, "prometheus_tsdb_head_truncations_total") + data.SendSumOfSummaries(out, sm.tsdbHeadGcDuration, "prometheus_tsdb_head_gc_duration_seconds") data.SendSumOfGauges(out, sm.tsdbActiveAppenders, "prometheus_tsdb_head_active_appenders") data.SendSumOfCounters(out, sm.tsdbSeriesNotFound, "prometheus_tsdb_head_series_not_found_total") data.SendSumOfGauges(out, sm.tsdbChunks, "prometheus_tsdb_head_chunks") @@ -427,20 +451,10 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCountersPerUser(out, sm.memSeriesRemovedTotal, "prometheus_tsdb_head_series_removed_total") } -// make a copy of the map, so that metrics can be gathered while the new registry is being added. -func (sm *tsdbMetrics) registries() map[string]*prometheus.Registry { - sm.regsMu.RLock() - defer sm.regsMu.RUnlock() - - regs := make(map[string]*prometheus.Registry, len(sm.regs)) - for u, r := range sm.regs { - regs[u] = r - } - return regs +func (sm *tsdbMetrics) setRegistryForUser(userID string, registry *prometheus.Registry) { + sm.regs.AddUserRegistry(userID, registry) } -func (sm *tsdbMetrics) setRegistryForUser(userID string, registry *prometheus.Registry) { - sm.regsMu.Lock() - sm.regs[userID] = registry - sm.regsMu.Unlock() +func (sm *tsdbMetrics) removeRegistryForUser(userID string) { + sm.regs.RemoveUserRegistry(userID, false) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go index 71044e8fa903d..9e6835d585d41 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go @@ -14,10 +14,10 @@ import ( tsdb_record "github.com/prometheus/prometheus/tsdb/record" "github.com/segmentio/fasthash/fnv1a" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ingester/index" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -172,7 +172,7 @@ func (us *userStates) teardown() { } func (us *userStates) getViaContext(ctx context.Context) (*userState, bool, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, false, fmt.Errorf("no user id") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go index 244c763d28fa5..8ec6b6abd9f5f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go @@ -445,7 +445,7 @@ func (w *walWrapper) deleteCheckpoints(maxIndex int) (err error) { } }() - var errs tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() files, err := ioutil.ReadDir(w.wal.Dir()) if err != nil { @@ -795,11 +795,8 @@ func processWALWithRepair(startSegment int, userStates *userStates, params walRe if err != nil { level.Error(util.Logger).Log("msg", "error in repairing WAL", "err", err) } - var multiErr tsdb_errors.MultiError - multiErr.Add(err) - multiErr.Add(w.Close()) - return multiErr.Err() + return tsdb_errors.NewMulti(err, w.Close()).Err() } // processWAL processes the records in the WAL concurrently. diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block_meta.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block_meta.go deleted file mode 100644 index ccce0ab2403de..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/block_meta.go +++ /dev/null @@ -1,52 +0,0 @@ -package querier - -import ( - "fmt" - "strings" - "time" - - "github.com/oklog/ulid" - "github.com/thanos-io/thanos/pkg/block/metadata" - - "github.com/cortexproject/cortex/pkg/util" -) - -// BlockMeta is a struct extending the Thanos block metadata and adding -// Cortex-specific data too. -type BlockMeta struct { - metadata.Meta - - // UploadedAt is the timestamp when the block has been completed to be uploaded - // to the storage. - UploadedAt time.Time -} - -func (m BlockMeta) String() string { - minT := util.TimeFromMillis(m.MinTime).UTC() - maxT := util.TimeFromMillis(m.MaxTime).UTC() - - return fmt.Sprintf("%s (min time: %s max time: %s)", m.ULID, minT.String(), maxT.String()) -} - -type BlockMetas []*BlockMeta - -func (s BlockMetas) String() string { - b := strings.Builder{} - - for idx, m := range s { - if idx > 0 { - b.WriteString(", ") - } - b.WriteString(m.String()) - } - - return b.String() -} - -func getULIDsFromBlockMetas(metas []*BlockMeta) []ulid.ULID { - ids := make([]ulid.ULID, len(metas)) - for i, m := range metas { - ids[i] = m.ULID - } - return ids -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go index 6edebf057cd12..b0de409110c0e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_consistency_checker.go @@ -8,7 +8,8 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/block/metadata" + + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" ) type BlocksConsistencyChecker struct { @@ -36,7 +37,7 @@ func NewBlocksConsistencyChecker(uploadGracePeriod, deletionGracePeriod time.Dur } } -func (c *BlocksConsistencyChecker) Check(knownBlocks []*BlockMeta, knownDeletionMarks map[ulid.ULID]*metadata.DeletionMark, queriedBlocks []ulid.ULID) (missingBlocks []ulid.ULID) { +func (c *BlocksConsistencyChecker) Check(knownBlocks bucketindex.Blocks, knownDeletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, queriedBlocks []ulid.ULID) (missingBlocks []ulid.ULID) { c.checksTotal.Inc() // Reverse the map of queried blocks, so that we can easily look for missing ones. @@ -46,7 +47,7 @@ func (c *BlocksConsistencyChecker) Check(knownBlocks []*BlockMeta, knownDeletion } // Look for any missing block. - for _, meta := range knownBlocks { + for _, block := range knownBlocks { // Some recently uploaded blocks, already discovered by the querier, may not have been discovered // and loaded by the store-gateway yet. In order to avoid false positives, we grant some time // to the store-gateway to discover them. It's safe to exclude recently uploaded blocks because: @@ -54,8 +55,8 @@ func (c *BlocksConsistencyChecker) Check(knownBlocks []*BlockMeta, knownDeletion // on the configured retention period). // - Blocks uploaded by compactor: the source blocks are marked for deletion but will continue to be // queried by queriers for a while (depends on the configured deletion marks delay). - if c.uploadGracePeriod > 0 && time.Since(meta.UploadedAt) < c.uploadGracePeriod { - level.Debug(c.logger).Log("msg", "block skipped from consistency check because it was uploaded recently", "block", meta.ULID.String(), "uploadedAt", meta.UploadedAt.String()) + if c.uploadGracePeriod > 0 && time.Since(block.GetUploadedAt()) < c.uploadGracePeriod { + level.Debug(c.logger).Log("msg", "block skipped from consistency check because it was uploaded recently", "block", block.ID.String(), "uploadedAt", block.GetUploadedAt().String()) continue } @@ -63,17 +64,17 @@ func (c *BlocksConsistencyChecker) Check(knownBlocks []*BlockMeta, knownDeletion // on blocks that can't be queried because they were offloaded. For this reason, we don't run the consistency check on any block // which has been marked for deletion more then "grace period" time ago. Basically, the grace period is the time // we still expect a block marked for deletion to be still queried. - if mark := knownDeletionMarks[meta.ULID]; mark != nil { + if mark := knownDeletionMarks[block.ID]; mark != nil { deletionTime := time.Unix(mark.DeletionTime, 0) if c.deletionGracePeriod > 0 && time.Since(deletionTime) > c.deletionGracePeriod { - level.Debug(c.logger).Log("msg", "block skipped from consistency check because it is marked for deletion", "block", meta.ULID.String(), "deletionTime", deletionTime.String()) + level.Debug(c.logger).Log("msg", "block skipped from consistency check because it is marked for deletion", "block", block.ID.String(), "deletionTime", deletionTime.String()) continue } } - if _, ok := actualBlocks[meta.ULID]; !ok { - missingBlocks = append(missingBlocks, meta.ULID) + if _, ok := actualBlocks[block.ID]; !ok { + missingBlocks = append(missingBlocks, block.ID) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go index 437dec2d55a14..5d2fb56083ffe 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go @@ -20,7 +20,9 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/objstore" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" @@ -47,6 +49,7 @@ type BlocksScanner struct { logger log.Logger bucketClient objstore.Bucket fetchersMetrics *storegateway.MetadataFetcherMetrics + usersScanner *cortex_tsdb.UsersScanner // We reuse the metadata fetcher instance for a given tenant both because of performance // reasons (the fetcher keeps a in-memory cache) and being able to collect and group metrics. @@ -55,9 +58,9 @@ type BlocksScanner struct { // Keep the per-tenant/user metas found during the last run. userMx sync.RWMutex - userMetas map[string][]*BlockMeta - userMetasLookup map[string]map[ulid.ULID]*BlockMeta - userDeletionMarks map[string]map[ulid.ULID]*metadata.DeletionMark + userMetas map[string]bucketindex.Blocks + userMetasLookup map[string]map[ulid.ULID]*bucketindex.Block + userDeletionMarks map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark scanDuration prometheus.Histogram scanLastSuccess prometheus.Gauge @@ -69,9 +72,10 @@ func NewBlocksScanner(cfg BlocksScannerConfig, bucketClient objstore.Bucket, log logger: logger, bucketClient: bucketClient, fetchers: make(map[string]userFetcher), - userMetas: make(map[string][]*BlockMeta), - userMetasLookup: make(map[string]map[ulid.ULID]*BlockMeta), - userDeletionMarks: map[string]map[ulid.ULID]*metadata.DeletionMark{}, + usersScanner: cortex_tsdb.NewUsersScanner(bucketClient, cortex_tsdb.AllUsers, logger), + userMetas: make(map[string]bucketindex.Blocks), + userMetasLookup: make(map[string]map[ulid.ULID]*bucketindex.Block), + userDeletionMarks: map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{}, fetchersMetrics: storegateway.NewMetadataFetcherMetrics(), scanDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_querier_blocks_scan_duration_seconds", @@ -98,7 +102,7 @@ func NewBlocksScanner(cfg BlocksScannerConfig, bucketClient objstore.Bucket, log // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. -func (d *BlocksScanner) GetBlocks(userID string, minT, maxT int64) ([]*BlockMeta, map[ulid.ULID]*metadata.DeletionMark, error) { +func (d *BlocksScanner) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { // We need to ensure the initial full bucket scan succeeded. if d.State() != services.Running { return nil, nil, errBlocksScannerNotRunning @@ -117,7 +121,7 @@ func (d *BlocksScanner) GetBlocks(userID string, minT, maxT int64) ([]*BlockMeta // Given we do expect the large majority of queries to have a time range close // to "now", we're going to find matching blocks iterating the list in reverse order. - var matchingMetas []*BlockMeta + var matchingMetas bucketindex.Blocks for i := len(userMetas) - 1; i >= 0; i-- { // NOTE: Block intervals are half-open: [MinTime, MaxTime). if userMetas[i].MinTime <= maxT && minT < userMetas[i].MaxTime { @@ -131,11 +135,11 @@ func (d *BlocksScanner) GetBlocks(userID string, minT, maxT int64) ([]*BlockMeta } // Filter deletion marks by matching blocks only. - matchingDeletionMarks := map[ulid.ULID]*metadata.DeletionMark{} + matchingDeletionMarks := map[ulid.ULID]*bucketindex.BlockDeletionMark{} if userDeletionMarks, ok := d.userDeletionMarks[userID]; ok { for _, m := range matchingMetas { - if d := userDeletionMarks[m.ULID]; d != nil { - matchingDeletionMarks[m.ULID] = d + if d := userDeletionMarks[m.ID]; d != nil { + matchingDeletionMarks[m.ID] = d } } } @@ -171,12 +175,18 @@ func (d *BlocksScanner) scanBucket(ctx context.Context) (returnErr error) { } }(time.Now()) + // Discover all users first. This helps cacheability of the object store call. + userIDs, _, err := d.usersScanner.ScanUsers(ctx) + if err != nil { + return err + } + jobsChan := make(chan string) resMx := sync.Mutex{} - resMetas := map[string][]*BlockMeta{} - resMetasLookup := map[string]map[ulid.ULID]*BlockMeta{} - resDeletionMarks := map[string]map[ulid.ULID]*metadata.DeletionMark{} - resErrs := tsdb_errors.MultiError{} + resMetas := map[string]bucketindex.Blocks{} + resMetasLookup := map[string]map[ulid.ULID]*bucketindex.Block{} + resDeletionMarks := map[string]map[ulid.ULID]*bucketindex.BlockDeletionMark{} + resErrs := tsdb_errors.NewMulti() // Create a pool of workers which will synchronize metas. The pool size // is limited in order to avoid to concurrently sync a lot of tenants in @@ -192,9 +202,9 @@ func (d *BlocksScanner) scanBucket(ctx context.Context) (returnErr error) { metas, deletionMarks, err := d.scanUserBlocksWithRetries(ctx, userID) // Build the lookup map. - lookup := map[ulid.ULID]*BlockMeta{} + lookup := map[ulid.ULID]*bucketindex.Block{} for _, m := range metas { - lookup[m.ULID] = m + lookup[m.ID] = m } resMx.Lock() @@ -210,21 +220,18 @@ func (d *BlocksScanner) scanBucket(ctx context.Context) (returnErr error) { }() } - // Iterate the bucket to discover users. - err := d.bucketClient.Iter(ctx, "", func(s string) error { - userID := strings.TrimSuffix(s, "/") + // Push a job for each user whose blocks need to be discovered. +pushJobsLoop: + for _, userID := range userIDs { select { case jobsChan <- userID: - return nil + // Nothing to do. case <-ctx.Done(): - return ctx.Err() + resMx.Lock() + resErrs.Add(ctx.Err()) + resMx.Unlock() + break pushJobsLoop } - }) - - if err != nil { - resMx.Lock() - resErrs.Add(err) - resMx.Unlock() } // Wait until all workers completed. @@ -259,7 +266,7 @@ func (d *BlocksScanner) scanBucket(ctx context.Context) (returnErr error) { // scanUserBlocksWithRetries runs scanUserBlocks() retrying multiple times // in case of error. -func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas []*BlockMeta, deletionMarks map[ulid.ULID]*metadata.DeletionMark, err error) { +func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas bucketindex.Blocks, deletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, err error) { retries := util.NewBackoff(ctx, util.BackoffConfig{ MinBackoff: time.Second, MaxBackoff: 30 * time.Second, @@ -278,7 +285,7 @@ func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID st return } -func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) ([]*BlockMeta, map[ulid.ULID]*metadata.DeletionMark, error) { +func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { fetcher, userBucket, deletionMarkFilter, err := d.getOrCreateMetaFetcher(userID) if err != nil { return nil, nil, errors.Wrapf(err, "create meta fetcher for user %s", userID) @@ -295,11 +302,9 @@ func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) ([]*B logPartialBlocks(userID, partials, d.logger) } - res := make([]*BlockMeta, 0, len(metas)) + res := make(bucketindex.Blocks, 0, len(metas)) for _, m := range metas { - blockMeta := &BlockMeta{ - Meta: *m, - } + blockMeta := bucketindex.BlockFromThanosMeta(*m) // If the block is already known, we can get the remaining attributes from there // because a block is immutable. @@ -315,7 +320,7 @@ func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) ([]*B // Since the meta.json file is the last file of a block being uploaded and it's immutable // we can safely assume that the last modified timestamp of the meta.json is the time when // the block has completed to be uploaded. - blockMeta.UploadedAt = attrs.LastModified + blockMeta.UploadedAt = attrs.LastModified.Unix() } res = append(res, blockMeta) @@ -324,7 +329,13 @@ func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) ([]*B // The blocks scanner expects all blocks to be sorted by max time. sortBlockMetasByMaxTime(res) - return res, deletionMarkFilter.DeletionMarkBlocks(), nil + // Convert deletion marks to our onw data type. + marks := map[ulid.ULID]*bucketindex.BlockDeletionMark{} + for id, m := range deletionMarkFilter.DeletionMarkBlocks() { + marks[id] = bucketindex.BlockDeletionMarkFromThanosMarker(m) + } + + return res, marks, nil } func (d *BlocksScanner) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { @@ -351,7 +362,7 @@ func (d *BlocksScanner) getOrCreateMetaFetcher(userID string) (block.MetadataFet func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { userLogger := util.WithUserID(userID, d.logger) - userBucket := cortex_tsdb.NewUserBucketClient(userID, d.bucketClient) + userBucket := bucket.NewUserBucketClient(userID, d.bucketClient) userReg := prometheus.NewRegistry() // The following filters have been intentionally omitted: @@ -360,7 +371,7 @@ func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, // - Deduplicate filter: omitted because it could cause troubles with the consistency check if // we "hide" source blocks because recently compacted by the compactor before the store-gateway instances // discover and load the compacted ones. - deletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay) + deletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay, d.cfg.MetasConcurrency) filters := []block.MetadataFilter{deletionMarkFilter} f, err := block.NewMetaFetcher( @@ -381,7 +392,7 @@ func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, return f, userBucket, deletionMarkFilter, nil } -func (d *BlocksScanner) getBlockMeta(userID string, blockID ulid.ULID) *BlockMeta { +func (d *BlocksScanner) getBlockMeta(userID string, blockID ulid.ULID) *bucketindex.Block { d.userMx.RLock() defer d.userMx.RUnlock() @@ -393,9 +404,9 @@ func (d *BlocksScanner) getBlockMeta(userID string, blockID ulid.ULID) *BlockMet return metas[blockID] } -func sortBlockMetasByMaxTime(metas []*BlockMeta) { - sort.Slice(metas, func(i, j int) bool { - return metas[i].MaxTime < metas[j].MaxTime +func sortBlockMetasByMaxTime(blocks bucketindex.Blocks) { + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].MaxTime < blocks[j].MaxTime }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go index 22ebaa285797d..f60ec7182273f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "sort" "strings" "sync" "time" @@ -18,11 +19,10 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/user" + "github.com/thanos-io/thanos/pkg/strutil" "go.uber.org/atomic" "golang.org/x/sync/errgroup" grpc_metadata "google.golang.org/grpc/metadata" @@ -30,9 +30,12 @@ import ( "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -66,7 +69,7 @@ type BlocksFinder interface { // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. - GetBlocks(userID string, minT, maxT int64) ([]*BlockMeta, map[ulid.ULID]*metadata.DeletionMark, error) + GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) } // BlocksStoreClient is the interface that should be implemented by any client used @@ -151,7 +154,7 @@ func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consist func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { var stores BlocksStoreSet - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), storageCfg.Bucket, "querier", logger, reg) + bucketClient, err := bucket.NewClient(context.Background(), storageCfg.Bucket, "querier", logger, reg) if err != nil { return nil, errors.Wrap(err, "failed to create bucket client") } @@ -159,16 +162,15 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa // Blocks scanner doesn't use chunks, but we pass config for consistency. cachingBucket, err := cortex_tsdb.CreateCachingBucket(storageCfg.BucketStore.ChunksCache, storageCfg.BucketStore.MetadataCache, bucketClient, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "querier"}, reg)) if err != nil { - return nil, errors.Wrapf(err, "create caching bucket") + return nil, errors.Wrap(err, "create caching bucket") } bucketClient = cachingBucket scanner := NewBlocksScanner(BlocksScannerConfig{ ScanInterval: storageCfg.BucketStore.SyncInterval, TenantsConcurrency: storageCfg.BucketStore.TenantSyncConcurrency, - MetasConcurrency: storageCfg.BucketStore.BlockSyncConcurrency, + MetasConcurrency: storageCfg.BucketStore.MetaSyncConcurrency, CacheDir: storageCfg.BucketStore.SyncDir, - ConsistencyDelay: storageCfg.BucketStore.ConsistencyDelay, IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, }, bucketClient, logger, reg) @@ -250,7 +252,7 @@ func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (s return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) } - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } @@ -292,14 +294,73 @@ func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. return q.selectSorted(sp, matchers...) } -func (q *blocksStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { - // Cortex doesn't use this. It will ask ingesters for metadata. - return nil, nil, errors.New("not implemented") +func (q *blocksStoreQuerier) LabelNames() ([]string, storage.Warnings, error) { + spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelNames") + defer spanLog.Span.Finish() + + minT, maxT := q.minT, q.maxT + + var ( + resMtx sync.Mutex + resNameSets = [][]string{} + resWarnings = storage.Warnings(nil) + ) + + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { + nameSets, warnings, queriedBlocks, err := q.fetchLabelNamesFromStore(spanCtx, clients, minT, maxT) + if err != nil { + return nil, err + } + + resMtx.Lock() + resNameSets = append(resNameSets, nameSets...) + resWarnings = append(resWarnings, warnings...) + resMtx.Unlock() + + return queriedBlocks, nil + } + + err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) + if err != nil { + return nil, nil, err + } + + return strutil.MergeSlices(resNameSets...), resWarnings, nil } -func (q *blocksStoreQuerier) LabelNames() ([]string, storage.Warnings, error) { - // Cortex doesn't use this. It will ask ingesters for metadata. - return nil, nil, errors.New("not implemented") +func (q *blocksStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + spanLog, spanCtx := spanlogger.New(q.ctx, "blocksStoreQuerier.LabelValues") + defer spanLog.Span.Finish() + + minT, maxT := q.minT, q.maxT + + var ( + resValueSets = [][]string{} + resWarnings = storage.Warnings(nil) + + resultMtx sync.Mutex + ) + + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { + valueSets, warnings, queriedBlocks, err := q.fetchLabelValuesFromStore(spanCtx, name, clients, minT, maxT) + if err != nil { + return nil, err + } + + resultMtx.Lock() + resValueSets = append(resValueSets, valueSets...) + resWarnings = append(resWarnings, warnings...) + resultMtx.Unlock() + + return queriedBlocks, nil + } + + err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) + if err != nil { + return nil, nil, err + } + + return strutil.MergeSlices(resValueSets...), resWarnings, nil } func (q *blocksStoreQuerier) Close() error { @@ -315,6 +376,55 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* minT, maxT = sp.Start, sp.End } + var ( + convertedMatchers = convertMatchersToLabelMatcher(matchers) + resSeriesSets = []storage.SeriesSet(nil) + resWarnings = storage.Warnings(nil) + + maxChunksLimit = q.limits.MaxChunksPerQuery(q.userID) + leftChunksLimit = maxChunksLimit + + resultMtx sync.Mutex + ) + + queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { + seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) + if err != nil { + return nil, err + } + + resultMtx.Lock() + + resSeriesSets = append(resSeriesSets, seriesSets...) + resWarnings = append(resWarnings, warnings...) + + // Given a single block is guaranteed to not be queried twice, we can safely decrease the number of + // chunks we can still read before hitting the limit (max == 0 means disabled). + if maxChunksLimit > 0 { + leftChunksLimit -= numChunks + } + + resultMtx.Unlock() + + return queriedBlocks, nil + } + + err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, queryFunc) + if err != nil { + return storage.ErrSeriesSet(err) + } + + if len(resSeriesSets) == 0 { + storage.EmptySeriesSet() + } + + return series.NewSeriesSetWithWarnings( + storage.NewMergeSeriesSet(resSeriesSets, storage.ChainedSeriesMerge), + resWarnings) +} + +func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, + queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error)) error { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip @@ -325,43 +435,37 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* maxT = util.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) if origMaxT != maxT { - level.Debug(spanLog).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) + level.Debug(logger).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) } if maxT < minT { q.metrics.storesHit.Observe(0) - level.Debug(spanLog).Log("msg", "empty query time range after max time manipulation") - return storage.EmptySeriesSet() + level.Debug(logger).Log("msg", "empty query time range after max time manipulation") + return nil } } // Find the list of blocks we need to query given the time range. - knownMetas, knownDeletionMarks, err := q.finder.GetBlocks(q.userID, minT, maxT) + knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, q.userID, minT, maxT) if err != nil { - return storage.ErrSeriesSet(err) + return err } - if len(knownMetas) == 0 { + if len(knownBlocks) == 0 { q.metrics.storesHit.Observe(0) - level.Debug(spanLog).Log("msg", "no blocks found") - return storage.EmptySeriesSet() + level.Debug(logger).Log("msg", "no blocks found") + return nil } - level.Debug(spanLog).Log("msg", "found blocks to query", "expected", BlockMetas(knownMetas).String()) + level.Debug(logger).Log("msg", "found blocks to query", "expected", knownBlocks.String()) var ( // At the beginning the list of blocks to query are all known blocks. - remainingBlocks = getULIDsFromBlockMetas(knownMetas) + remainingBlocks = knownBlocks.GetULIDs() attemptedBlocks = map[ulid.ULID][]string{} touchedStores = map[string]struct{}{} - convertedMatchers = convertMatchersToLabelMatcher(matchers) - resSeriesSets = []storage.SeriesSet(nil) - resWarnings = storage.Warnings(nil) - resQueriedBlocks = []ulid.ULID(nil) - - maxChunksLimit = q.limits.MaxChunksPerQuery(q.userID) - leftChunksLimit = maxChunksLimit + resQueriedBlocks = []ulid.ULID(nil) ) for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { @@ -372,32 +476,24 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* // If it's a retry and we get an error, it means there are no more store-gateways left // from which running another attempt, so we're just stopping retrying. if attempt > 1 { - level.Warn(spanLog).Log("msg", "unable to get store-gateway clients while retrying to fetch missing blocks", "err", err) + level.Warn(logger).Log("msg", "unable to get store-gateway clients while retrying to fetch missing blocks", "err", err) break } - return storage.ErrSeriesSet(err) + return err } - level.Debug(spanLog).Log("msg", "found store-gateway instances to query", "num instances", len(clients), "attempt", attempt) + level.Debug(logger).Log("msg", "found store-gateway instances to query", "num instances", len(clients), "attempt", attempt) // Fetch series from stores. If an error occur we do not retry because retries // are only meant to cover missing blocks. - seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) + queriedBlocks, err := queryFunc(clients, minT, maxT) if err != nil { - return storage.ErrSeriesSet(err) + return err } - level.Debug(spanLog).Log("msg", "received series from all store-gateways", "queried blocks", strings.Join(convertULIDsToString(queriedBlocks), " ")) + level.Debug(logger).Log("msg", "received series from all store-gateways", "queried blocks", strings.Join(convertULIDsToString(queriedBlocks), " ")) - resSeriesSets = append(resSeriesSets, seriesSets...) - resWarnings = append(resWarnings, warnings...) resQueriedBlocks = append(resQueriedBlocks, queriedBlocks...) - // Given a single block is guaranteed to not be queried twice, we can safely decrease the number of - // chunks we can still read before hitting the limit (max == 0 means disabled). - if maxChunksLimit > 0 { - leftChunksLimit -= numChunks - } - // Update the map of blocks we attempted to query. for client, blockIDs := range clients { touchedStores[client.RemoteAddress()] = struct{}{} @@ -408,31 +504,28 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } // Ensure all expected blocks have been queried (during all tries done so far). - missingBlocks := q.consistency.Check(knownMetas, knownDeletionMarks, resQueriedBlocks) + missingBlocks := q.consistency.Check(knownBlocks, knownDeletionMarks, resQueriedBlocks) if len(missingBlocks) == 0 { q.metrics.storesHit.Observe(float64(len(touchedStores))) q.metrics.refetches.Observe(float64(attempt - 1)) - return series.NewSeriesSetWithWarnings( - storage.NewMergeSeriesSet(resSeriesSets, storage.ChainedSeriesMerge), - resWarnings) + return nil } - level.Debug(spanLog).Log("msg", "consistency check failed", "attempt", attempt, "missing blocks", strings.Join(convertULIDsToString(missingBlocks), " ")) + level.Debug(logger).Log("msg", "consistency check failed", "attempt", attempt, "missing blocks", strings.Join(convertULIDsToString(missingBlocks), " ")) // The next attempt should just query the missing blocks. remainingBlocks = missingBlocks } // We've not been able to query all expected blocks after all retries. - err = fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) - level.Warn(util.WithContext(spanCtx, spanLog)).Log("msg", "failed consistency check", "err", err) - - return storage.ErrSeriesSet(err) + level.Warn(util.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) + return fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) } func (q *blocksStoreQuerier) fetchSeriesFromStores( ctx context.Context, + sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, @@ -459,14 +552,20 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( blockIDs := blockIDs g.Go(func() error { - req, err := createSeriesRequest(minT, maxT, convertedMatchers, blockIDs) + // See: https://github.com/prometheus/prometheus/pull/8050 + // TODO(goutham): we should ideally be passing the hints down to the storage layer + // and let the TSDB return us data with no chunks as in prometheus#8050. + // But this is an acceptable workaround for now. + skipChunks := sp != nil && sp.Func == "series" + + req, err := createSeriesRequest(minT, maxT, convertedMatchers, skipChunks, blockIDs) if err != nil { return errors.Wrapf(err, "failed to create series request") } stream, err := c.Series(gCtx, req) if err != nil { - return errors.Wrapf(err, "failed to fetch series from %s", c) + return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) } mySeries := []*storepb.Series(nil) @@ -485,7 +584,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( break } if err != nil { - return errors.Wrapf(err, "failed to receive series from %s", c) + return errors.Wrapf(err, "failed to receive series from %s", c.RemoteAddress()) } // Response may either contain series, warning or hints. @@ -508,7 +607,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( if h := resp.GetHints(); h != nil { hints := hintspb.SeriesResponseHints{} if err := types.UnmarshalAny(h, &hints); err != nil { - return errors.Wrapf(err, "failed to unmarshal hints from %s", c) + return errors.Wrapf(err, "failed to unmarshal series hints from %s", c.RemoteAddress()) } ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) @@ -521,7 +620,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( } level.Debug(spanLog).Log("msg", "received series from store-gateway", - "instance", c, + "instance", c.RemoteAddress(), "num series", len(mySeries), "bytes series", countSeriesBytes(mySeries), "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), @@ -546,7 +645,161 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil } -func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { +func (q *blocksStoreQuerier) fetchLabelNamesFromStore( + ctx context.Context, + clients map[BlocksStoreClient][]ulid.ULID, + minT int64, + maxT int64, +) ([][]string, storage.Warnings, []ulid.ULID, error) { + var ( + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) + g, gCtx = errgroup.WithContext(reqCtx) + mtx = sync.Mutex{} + nameSets = [][]string{} + warnings = storage.Warnings(nil) + queriedBlocks = []ulid.ULID(nil) + spanLog = spanlogger.FromContext(ctx) + ) + + // Concurrently fetch series from all clients. + for c, blockIDs := range clients { + // Change variables scope since it will be used in a goroutine. + c := c + blockIDs := blockIDs + + g.Go(func() error { + req, err := createLabelNamesRequest(minT, maxT, blockIDs) + if err != nil { + return errors.Wrapf(err, "failed to create label names request") + } + + namesResp, err := c.LabelNames(gCtx, req) + if err != nil { + return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) + } + + myQueriedBlocks := []ulid.ULID(nil) + if namesResp.Hints != nil { + hints := hintspb.LabelNamesResponseHints{} + if err := types.UnmarshalAny(namesResp.Hints, &hints); err != nil { + return errors.Wrapf(err, "failed to unmarshal label names hints from %s", c.RemoteAddress()) + } + + ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) + if err != nil { + return errors.Wrapf(err, "failed to parse queried block IDs from received hints") + } + + myQueriedBlocks = ids + } + + level.Debug(spanLog).Log("msg", "received label names from store-gateway", + "instance", c, + "num labels", len(namesResp.Names), + "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), + "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) + + // Store the result. + mtx.Lock() + nameSets = append(nameSets, namesResp.Names) + for _, w := range namesResp.Warnings { + warnings = append(warnings, errors.New(w)) + } + queriedBlocks = append(queriedBlocks, myQueriedBlocks...) + mtx.Unlock() + + return nil + }) + } + + // Wait until all client requests complete. + if err := g.Wait(); err != nil { + return nil, nil, nil, err + } + + return nameSets, warnings, queriedBlocks, nil +} + +func (q *blocksStoreQuerier) fetchLabelValuesFromStore( + ctx context.Context, + name string, + clients map[BlocksStoreClient][]ulid.ULID, + minT int64, + maxT int64, +) ([][]string, storage.Warnings, []ulid.ULID, error) { + var ( + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) + g, gCtx = errgroup.WithContext(reqCtx) + mtx = sync.Mutex{} + valueSets = [][]string{} + warnings = storage.Warnings(nil) + queriedBlocks = []ulid.ULID(nil) + spanLog = spanlogger.FromContext(ctx) + ) + + // Concurrently fetch series from all clients. + for c, blockIDs := range clients { + // Change variables scope since it will be used in a goroutine. + c := c + blockIDs := blockIDs + + g.Go(func() error { + req, err := createLabelValuesRequest(minT, maxT, name, blockIDs) + if err != nil { + return errors.Wrapf(err, "failed to create label values request") + } + + valuesResp, err := c.LabelValues(gCtx, req) + if err != nil { + return errors.Wrapf(err, "failed to fetch series from %s", c.RemoteAddress()) + } + + myQueriedBlocks := []ulid.ULID(nil) + if valuesResp.Hints != nil { + hints := hintspb.LabelValuesResponseHints{} + if err := types.UnmarshalAny(valuesResp.Hints, &hints); err != nil { + return errors.Wrapf(err, "failed to unmarshal label values hints from %s", c.RemoteAddress()) + } + + ids, err := convertBlockHintsToULIDs(hints.QueriedBlocks) + if err != nil { + return errors.Wrapf(err, "failed to parse queried block IDs from received hints") + } + + myQueriedBlocks = ids + } + + level.Debug(spanLog).Log("msg", "received label values from store-gateway", + "instance", c.RemoteAddress(), + "num values", len(valuesResp.Values), + "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), + "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) + + // Values returned need not be sorted, but we need them to be sorted so we can merge. + sort.Strings(valuesResp.Values) + + // Store the result. + mtx.Lock() + valueSets = append(valueSets, valuesResp.Values) + for _, w := range valuesResp.Warnings { + warnings = append(warnings, errors.New(w)) + } + queriedBlocks = append(queriedBlocks, myQueriedBlocks...) + mtx.Unlock() + + return nil + }) + } + + // Wait until all client requests complete. + if err := g.Wait(); err != nil { + return nil, nil, nil, err + } + + return valueSets, warnings, queriedBlocks, nil +} + +func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skipChunks bool, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { // Selectively query only specific blocks. hints := &hintspb.SeriesRequestHints{ BlockMatchers: []storepb.LabelMatcher{ @@ -560,7 +813,7 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, bloc anyHints, err := types.MarshalAny(hints) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal request hints") + return nil, errors.Wrapf(err, "failed to marshal series request hints") } return &storepb.SeriesRequest{ @@ -569,9 +822,65 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, bloc Matchers: matchers, PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, Hints: anyHints, + SkipChunks: skipChunks, }, nil } +func createLabelNamesRequest(minT, maxT int64, blockIDs []ulid.ULID) (*storepb.LabelNamesRequest, error) { + req := &storepb.LabelNamesRequest{ + Start: minT, + End: maxT, + } + + // Selectively query only specific blocks. + hints := &hintspb.LabelNamesRequestHints{ + BlockMatchers: []storepb.LabelMatcher{ + { + Type: storepb.LabelMatcher_RE, + Name: block.BlockIDLabel, + Value: strings.Join(convertULIDsToString(blockIDs), "|"), + }, + }, + } + + anyHints, err := types.MarshalAny(hints) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal label names request hints") + } + + req.Hints = anyHints + + return req, nil +} + +func createLabelValuesRequest(minT, maxT int64, label string, blockIDs []ulid.ULID) (*storepb.LabelValuesRequest, error) { + req := &storepb.LabelValuesRequest{ + Start: minT, + End: maxT, + Label: label, + } + + // Selectively query only specific blocks. + hints := &hintspb.LabelValuesRequestHints{ + BlockMatchers: []storepb.LabelMatcher{ + { + Type: storepb.LabelMatcher_RE, + Name: block.BlockIDLabel, + Value: strings.Join(convertULIDsToString(blockIDs), "|"), + }, + }, + } + + anyHints, err := types.MarshalAny(hints) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal label values request hints") + } + + req.Hints = anyHints + + return req, nil +} + func convertULIDsToString(ids []ulid.ULID) []string { res := make([]string, len(ids)) for idx, id := range ids { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go index da1170005d7e0..03d09986e7c1b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go @@ -7,12 +7,12 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier/chunkstore" seriesset "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/tenant" ) type chunkIteratorFunc func(chunks []chunk.Chunk, from, through model.Time) chunkenc.Iterator @@ -39,10 +39,18 @@ type chunkStoreQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. func (q *chunkStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - userID, err := user.ExtractOrgID(q.ctx) + userID, err := tenant.TenantID(q.ctx) if err != nil { return storage.ErrSeriesSet(err) } + + // We will hit this for /series lookup when -querier.query-store-for-labels-enabled is set. + // If we don't skip here, it'll make /series lookups extremely slow as all the chunks will be loaded. + // That flag is only to be set with blocks storage engine, and this is a protective measure. + if sp == nil || sp.Func == "series" { + return storage.EmptySeriesSet() + } + chunks, err := q.store.Get(q.ctx, userID, model.Time(sp.Start), model.Time(sp.End), matchers...) if err != nil { return storage.ErrSeriesSet(err) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go index 924447069da7e..d85987630bafc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go @@ -8,9 +8,9 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/querier/chunkstore" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" ) @@ -20,7 +20,7 @@ import ( // on ingester chunk query streaming. func ChunksHandler(queryable storage.Queryable) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userID, err := user.ExtractOrgID(r.Context()) + userID, err := tenant.TenantID(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go index 2f3743543a159..2c4800e68cf8d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go @@ -10,11 +10,11 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -25,8 +25,8 @@ import ( type Distributor interface { Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*client.QueryStreamResponse, error) - LabelValuesForLabelName(context.Context, model.LabelName) ([]string, error) - LabelNames(context.Context) ([]string, error) + LabelValuesForLabelName(ctx context.Context, from, to model.Time, label model.LabelName) ([]string, error) + LabelNames(context.Context, model.Time, model.Time) ([]string, error) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) } @@ -129,7 +129,7 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. } func (q *distributorQuerier) streamingSelect(minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet { - userID, err := user.ExtractOrgID(q.ctx) + userID, err := tenant.TenantID(q.ctx) if err != nil { return storage.ErrSeriesSet(err) } @@ -183,12 +183,13 @@ func (q *distributorQuerier) streamingSelect(minT, maxT int64, matchers []*label } func (q *distributorQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { - lv, err := q.distributor.LabelValuesForLabelName(q.ctx, model.LabelName(name)) - return lv, nil, err + lvs, err := q.distributor.LabelValuesForLabelName(q.ctx, model.Time(q.mint), model.Time(q.maxt), model.LabelName(name)) + + return lvs, nil, err } func (q *distributorQuerier) LabelNames() ([]string, storage.Warnings, error) { - ln, err := q.distributor.LabelNames(q.ctx) + ln, err := q.distributor.LabelNames(q.ctx, model.Time(q.mint), model.Time(q.maxt)) return ln, nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go deleted file mode 100644 index 54356592e209b..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ /dev/null @@ -1,531 +0,0 @@ -package frontend - -import ( - "bytes" - "context" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "path" - "strings" - "sync" - "time" - - "github.com/NYTimes/gziphandler" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - opentracing "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/user" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - // StatusClientClosedRequest is the status code for when a client request cancellation of an http request - StatusClientClosedRequest = 499 -) - -var ( - errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests") - errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) - errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) -) - -// Config for a Frontend. -type Config struct { - MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` - CompressResponses bool `yaml:"compress_responses"` - DownstreamURL string `yaml:"downstream_url"` - LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.IntVar(&cfg.MaxOutstandingPerTenant, "querier.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429.") - f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.") - f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") - f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") -} - -type Limits interface { - // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. - MaxQueriersPerUser(user string) int -} - -// Frontend queues HTTP requests, dispatches them to backends, and handles retries -// for requests which failed. -type Frontend struct { - cfg Config - log log.Logger - roundTripper http.RoundTripper - limits Limits - - mtx sync.Mutex - cond *sync.Cond // Notified when request is enqueued or dequeued, or querier is disconnected. - queues *queues - - connectedClients *atomic.Int32 - - // Metrics. - numClients prometheus.GaugeFunc - queueDuration prometheus.Histogram - queueLength *prometheus.GaugeVec -} - -type request struct { - enqueueTime time.Time - queueSpan opentracing.Span - originalCtx context.Context - - request *httpgrpc.HTTPRequest - err chan error - response chan *httpgrpc.HTTPResponse -} - -// New creates a new frontend. -func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Frontend, error) { - connectedClients := atomic.NewInt32(0) - f := &Frontend{ - cfg: cfg, - log: log, - limits: limits, - queues: newUserQueues(cfg.MaxOutstandingPerTenant), - queueDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "query_frontend_queue_duration_seconds", - Help: "Time spend by requests queued.", - Buckets: prometheus.DefBuckets, - }), - queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "query_frontend_queue_length", - Help: "Number of queries in the queue.", - }, []string{"user"}), - numClients: promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "query_frontend_connected_clients", - Help: "Number of worker clients currently connected to the frontend.", - }, func() float64 { return float64(connectedClients.Load()) }), - connectedClients: connectedClients, - } - f.cond = sync.NewCond(&f.mtx) - - // The front end implements http.RoundTripper using a GRPC worker queue by default. - f.roundTripper = f - // However if the user has specified a downstream Prometheus, then we should use that. - if cfg.DownstreamURL != "" { - u, err := url.Parse(cfg.DownstreamURL) - if err != nil { - return nil, err - } - - f.roundTripper = RoundTripFunc(func(r *http.Request) (*http.Response, error) { - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(r.Context()) - if tracer != nil && span != nil { - carrier := opentracing.HTTPHeadersCarrier(r.Header) - tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - } - r.URL.Scheme = u.Scheme - r.URL.Host = u.Host - r.URL.Path = path.Join(u.Path, r.URL.Path) - r.Host = "" - return http.DefaultTransport.RoundTrip(r) - }) - } - - return f, nil -} - -// Wrap uses a Tripperware to chain a new RoundTripper to the frontend. -func (f *Frontend) Wrap(trw Tripperware) { - f.roundTripper = trw(f.roundTripper) -} - -// Tripperware is a signature for all http client-side middleware. -type Tripperware func(http.RoundTripper) http.RoundTripper - -// RoundTripFunc is to http.RoundTripper what http.HandlerFunc is to http.Handler. -type RoundTripFunc func(*http.Request) (*http.Response, error) - -// RoundTrip implements http.RoundTripper. -func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return f(r) -} - -// Close stops new requests and errors out any pending requests. -func (f *Frontend) Close() { - f.mtx.Lock() - defer f.mtx.Unlock() - for f.queues.len() > 0 { - f.cond.Wait() - } -} - -// Handler for HTTP requests. -func (f *Frontend) Handler() http.Handler { - if f.cfg.CompressResponses { - return gziphandler.GzipHandler(http.HandlerFunc(f.handle)) - } - return http.HandlerFunc(f.handle) -} - -func (f *Frontend) handle(w http.ResponseWriter, r *http.Request) { - - startTime := time.Now() - resp, err := f.roundTripper.RoundTrip(r) - queryResponseTime := time.Since(startTime) - - if err != nil { - writeError(w, err) - } else { - hs := w.Header() - for h, vs := range resp.Header { - hs[h] = vs - } - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) - } - - // If LogQueriesLongerThan is set to <0 we log every query, if it is set to 0 query logging - // is disabled - if f.cfg.LogQueriesLongerThan != 0 && queryResponseTime > f.cfg.LogQueriesLongerThan { - logMessage := []interface{}{ - "msg", "slow query detected", - "method", r.Method, - "host", r.Host, - "path", r.URL.Path, - "time_taken", queryResponseTime.String(), - } - - // Ensure the form has been parsed so all the parameters are present - err = r.ParseForm() - if err != nil { - level.Warn(util.WithContext(r.Context(), f.log)).Log("msg", "unable to parse form for request", "err", err) - } - - // Attempt to iterate through the Form to log any filled in values - for k, v := range r.Form { - logMessage = append(logMessage, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) - } - - level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) - } -} - -func writeError(w http.ResponseWriter, err error) { - switch err { - case context.Canceled: - err = errCanceled - case context.DeadlineExceeded: - err = errDeadlineExceeded - default: - } - server.WriteError(w, err) -} - -// RoundTrip implement http.Transport. -func (f *Frontend) RoundTrip(r *http.Request) (*http.Response, error) { - req, err := server.HTTPRequest(r) - if err != nil { - return nil, err - } - - resp, err := f.RoundTripGRPC(r.Context(), req) - if err != nil { - return nil, err - } - - httpResp := &http.Response{ - StatusCode: int(resp.Code), - Body: ioutil.NopCloser(bytes.NewReader(resp.Body)), - Header: http.Header{}, - } - for _, h := range resp.Headers { - httpResp.Header[h.Key] = h.Values - } - return httpResp, nil -} - -type httpgrpcHeadersCarrier httpgrpc.HTTPRequest - -func (c *httpgrpcHeadersCarrier) Set(key, val string) { - c.Headers = append(c.Headers, &httpgrpc.Header{ - Key: key, - Values: []string{val}, - }) -} - -// RoundTripGRPC round trips a proto (instead of a HTTP request). -func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - // Propagate trace context in gRPC too - this will be ignored if using HTTP. - tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) - if tracer != nil && span != nil { - carrier := (*httpgrpcHeadersCarrier)(req) - tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) - } - - request := request{ - request: req, - originalCtx: ctx, - - // Buffer of 1 to ensure response can be written by the server side - // of the Process stream, even if this goroutine goes away due to - // client context cancellation. - err: make(chan error, 1), - response: make(chan *httpgrpc.HTTPResponse, 1), - } - - if err := f.queueRequest(ctx, &request); err != nil { - return nil, err - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - - case resp := <-request.response: - return resp, nil - - case err := <-request.err: - return nil, err - } -} - -// Process allows backends to pull requests from the frontend. -func (f *Frontend) Process(server Frontend_ProcessServer) error { - querierID, err := getQuerierID(server) - if err != nil { - return err - } - - f.registerQuerierConnection(querierID) - defer f.unregisterQuerierConnection(querierID) - - // If the downstream request(from querier -> frontend) is cancelled, - // we need to ping the condition variable to unblock getNextRequestForQuerier. - // Ideally we'd have ctx aware condition variables... - go func() { - <-server.Context().Done() - f.cond.Broadcast() - }() - - lastUserIndex := -1 - - for { - req, idx, err := f.getNextRequestForQuerier(server.Context(), lastUserIndex, querierID) - if err != nil { - return err - } - lastUserIndex = idx - - // Handle the stream sending & receiving on a goroutine so we can - // monitoring the contexts in a select and cancel things appropriately. - resps := make(chan *httpgrpc.HTTPResponse, 1) - errs := make(chan error, 1) - go func() { - err = server.Send(&FrontendToClient{ - Type: HTTP_REQUEST, - HttpRequest: req.request, - }) - if err != nil { - errs <- err - return - } - - resp, err := server.Recv() - if err != nil { - errs <- err - return - } - - resps <- resp.HttpResponse - }() - - select { - // If the upstream request is cancelled, we need to cancel the - // downstream req. Only way we can do that is to close the stream. - // The worker client is expecting this semantics. - case <-req.originalCtx.Done(): - return req.originalCtx.Err() - - // Is there was an error handling this request due to network IO, - // then error out this upstream request _and_ stream. - case err := <-errs: - req.err <- err - return err - - // Happy path: propagate the response. - case resp := <-resps: - req.response <- resp - } - } -} - -func getQuerierID(server Frontend_ProcessServer) (string, error) { - err := server.Send(&FrontendToClient{ - Type: GET_ID, - // Old queriers don't support GET_ID, and will try to use the request. - // To avoid confusing them, include dummy request. - HttpRequest: &httpgrpc.HTTPRequest{ - Method: "GET", - Url: "/invalid_request_sent_by_frontend", - }, - }) - - if err != nil { - return "", err - } - - resp, err := server.Recv() - - // Old queriers will return empty string, which is fine. All old queriers will be - // treated as single querier with lot of connections. - // (Note: if resp is nil, GetClientID() returns "") - return resp.GetClientID(), err -} - -func (f *Frontend) queueRequest(ctx context.Context, req *request) error { - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return err - } - - req.enqueueTime = time.Now() - req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") - - maxQueriers := f.limits.MaxQueriersPerUser(userID) - - f.mtx.Lock() - defer f.mtx.Unlock() - - queue := f.queues.getOrAddQueue(userID, maxQueriers) - if queue == nil { - // This can only happen if userID is "". - return errors.New("no queue found") - } - - select { - case queue <- req: - f.queueLength.WithLabelValues(userID).Inc() - f.cond.Broadcast() - return nil - default: - return errTooManyRequest - } -} - -// getQueue picks a random queue and takes the next unexpired request off of it, so we -// fairly process users queries. Will block if there are no requests. -func (f *Frontend) getNextRequestForQuerier(ctx context.Context, lastUserIndex int, querierID string) (*request, int, error) { - f.mtx.Lock() - defer f.mtx.Unlock() - - querierWait := false - -FindQueue: - // We need to wait if there are no users, or no pending requests for given querier. - for (f.queues.len() == 0 || querierWait) && ctx.Err() == nil { - querierWait = false - f.cond.Wait() - } - - if err := ctx.Err(); err != nil { - return nil, lastUserIndex, err - } - - for { - queue, userID, idx := f.queues.getNextQueueForQuerier(lastUserIndex, querierID) - lastUserIndex = idx - if queue == nil { - break - } - /* - We want to dequeue the next unexpired request from the chosen tenant queue. - The chance of choosing a particular tenant for dequeueing is (1/active_tenants). - This is problematic under load, especially with other middleware enabled such as - querier.split-by-interval, where one request may fan out into many. - If expired requests aren't exhausted before checking another tenant, it would take - n_active_tenants * n_expired_requests_at_front_of_queue requests being processed - before an active request was handled for the tenant in question. - If this tenant meanwhile continued to queue requests, - it's possible that it's own queue would perpetually contain only expired requests. - */ - - // Pick the first non-expired request from this user's queue (if any). - for { - lastRequest := false - request := <-queue - if len(queue) == 0 { - f.queues.deleteQueue(userID) - lastRequest = true - } - - // Tell close() we've processed a request. - f.cond.Broadcast() - - f.queueDuration.Observe(time.Since(request.enqueueTime).Seconds()) - f.queueLength.WithLabelValues(userID).Dec() - request.queueSpan.Finish() - - // Ensure the request has not already expired. - if request.originalCtx.Err() == nil { - return request, lastUserIndex, nil - } - - // Stop iterating on this queue if we've just consumed the last request. - if lastRequest { - break - } - } - } - - // There are no unexpired requests, so we can get back - // and wait for more requests. - querierWait = true - goto FindQueue -} - -// CheckReady determines if the query frontend is ready. Function parameters/return -// chosen to match the same method in the ingester -func (f *Frontend) CheckReady(_ context.Context) error { - // if the downstream url is configured the query frontend is not aware of the state - // of the queriers and is therefore always ready - if f.cfg.DownstreamURL != "" { - return nil - } - - // if we have more than one querier connected we will consider ourselves ready - connectedClients := f.connectedClients.Load() - if connectedClients > 0 { - return nil - } - - msg := fmt.Sprintf("not ready: number of queriers connected to query-frontend is %d", connectedClients) - level.Info(f.log).Log("msg", msg) - return errors.New(msg) -} - -func (f *Frontend) registerQuerierConnection(querier string) { - f.connectedClients.Inc() - - f.mtx.Lock() - defer f.mtx.Unlock() - f.queues.addQuerierConnection(querier) -} - -func (f *Frontend) unregisterQuerierConnection(querier string) { - f.connectedClients.Dec() - - f.mtx.Lock() - defer f.mtx.Unlock() - f.queues.removeQuerierConnection(querier) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go deleted file mode 100644 index 48f133659cddf..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go +++ /dev/null @@ -1,219 +0,0 @@ -package frontend - -import ( - "context" - "flag" - "fmt" - "math/rand" - "os" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" - "github.com/weaveworks/common/httpgrpc/server" - "github.com/weaveworks/common/middleware" - "google.golang.org/grpc" - "google.golang.org/grpc/naming" - - "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/services" -) - -// WorkerConfig is config for a worker. -type WorkerConfig struct { - Address string `yaml:"frontend_address"` - Parallelism int `yaml:"parallelism"` - MatchMaxConcurrency bool `yaml:"match_max_concurrent"` - DNSLookupDuration time.Duration `yaml:"dns_lookup_duration"` - QuerierID string `yaml:"id"` - - GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` -} - -// RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Address, "querier.frontend-address", "", "Address of query frontend service, in host:port format.") - f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process per query frontend.") - f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.") - f.DurationVar(&cfg.DNSLookupDuration, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS.") - f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") - - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) -} - -func (cfg *WorkerConfig) Validate(log log.Logger) error { - return cfg.GRPCClientConfig.Validate(log) -} - -// Worker is the counter-part to the frontend, actually processing requests. -type worker struct { - cfg WorkerConfig - querierCfg querier.Config - log log.Logger - server *server.Server - - watcher naming.Watcher //nolint:staticcheck //Skipping for now. If you still see this more than likely issue https://github.com/cortexproject/cortex/issues/2015 has not yet been addressed. - managers map[string]*frontendManager -} - -// NewWorker creates a new worker and returns a service that is wrapping it. -// If no address is specified, it returns nil service (and no error). -func NewWorker(cfg WorkerConfig, querierCfg querier.Config, server *server.Server, log log.Logger) (services.Service, error) { - if cfg.Address == "" { - level.Info(log).Log("msg", "no address specified, not starting worker") - return nil, nil - } - - if cfg.QuerierID == "" { - hostname, err := os.Hostname() - if err != nil { - return nil, errors.Wrap(err, "unable to get hostname used to initialise default querier ID") - } - cfg.QuerierID = hostname - } - - resolver, err := naming.NewDNSResolverWithFreq(cfg.DNSLookupDuration) - if err != nil { - return nil, err - } - - watcher, err := resolver.Resolve(cfg.Address) - if err != nil { - return nil, err - } - - w := &worker{ - cfg: cfg, - querierCfg: querierCfg, - log: log, - server: server, - watcher: watcher, - managers: map[string]*frontendManager{}, - } - return services.NewBasicService(nil, w.watchDNSLoop, w.stopping), nil -} - -func (w *worker) stopping(_ error) error { - // wait until all per-address workers are done. This is only called after watchDNSLoop exits. - for _, mgr := range w.managers { - mgr.stop() - } - return nil -} - -// watchDNSLoop watches for changes in DNS and starts or stops workers. -func (w *worker) watchDNSLoop(servCtx context.Context) error { - go func() { - // Close the watcher, when this service is asked to stop. - // Closing the watcher makes watchDNSLoop exit, since it only iterates on watcher updates, and has no other - // way to stop. We cannot close the watcher in `stopping` method, because it is only called *after* - // watchDNSLoop exits. - <-servCtx.Done() - w.watcher.Close() - }() - - for { - updates, err := w.watcher.Next() - if err != nil { - // watcher.Next returns error when Close is called, but we call Close when our context is done. - // we don't want to report error in that case. - if servCtx.Err() != nil { - return nil - } - return errors.Wrapf(err, "error from DNS watcher") - } - - for _, update := range updates { - switch update.Op { - case naming.Add: - level.Debug(w.log).Log("msg", "adding connection", "addr", update.Addr) - conn, err := w.connect(servCtx, update.Addr) - if err != nil { - level.Error(w.log).Log("msg", "error connecting", "addr", update.Addr, "err", err) - continue - } - - w.managers[update.Addr] = newFrontendManager(servCtx, w.log, w.server, conn, NewFrontendClient(conn), w.cfg.GRPCClientConfig, w.cfg.QuerierID) - - case naming.Delete: - level.Debug(w.log).Log("msg", "removing connection", "addr", update.Addr) - if mgr, ok := w.managers[update.Addr]; ok { - mgr.stop() - delete(w.managers, update.Addr) - } - - default: - return fmt.Errorf("unknown op: %v", update.Op) - } - } - - w.resetConcurrency() - } -} - -func (w *worker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) { - opts, err := w.cfg.GRPCClientConfig.DialOption([]grpc.UnaryClientInterceptor{middleware.ClientUserHeaderInterceptor}, nil) - if err != nil { - return nil, err - } - - conn, err := grpc.DialContext(ctx, address, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -func (w *worker) resetConcurrency() { - addresses := make([]string, 0, len(w.managers)) - for addr := range w.managers { - addresses = append(addresses, addr) - } - rand.Shuffle(len(addresses), func(i, j int) { addresses[i], addresses[j] = addresses[j], addresses[i] }) - - totalConcurrency := 0 - for i, addr := range addresses { - concurrentRequests := w.concurrency(i, addr) - totalConcurrency += concurrentRequests - - if mgr, ok := w.managers[addr]; ok { - mgr.concurrentRequests(concurrentRequests) - } else { - level.Error(w.log).Log("msg", "address not found in managers map. this should not happen", "addr", addr) - } - } - - if totalConcurrency > w.querierCfg.MaxConcurrent { - level.Warn(w.log).Log("msg", "total worker concurrency is greater than promql max concurrency. queries may be queued in the querier which reduces QOS") - } -} - -func (w *worker) concurrency(index int, addr string) int { - concurrentRequests := 0 - - if w.cfg.MatchMaxConcurrency { - concurrentRequests = w.querierCfg.MaxConcurrent / len(w.managers) - - // If max concurrency does not evenly divide into our frontends a subset will be chosen - // to receive an extra connection. Frontend addresses were shuffled above so this will be a - // random selection of frontends. - if index < w.querierCfg.MaxConcurrent%len(w.managers) { - level.Warn(w.log).Log("msg", "max concurrency is not evenly divisible across query frontends. adding an extra connection", "addr", addr) - concurrentRequests++ - } - } else { - concurrentRequests = w.cfg.Parallelism - } - - // If concurrentRequests is 0 then w.querierCfg.MaxConcurrent is less than the total number of - // query frontends. In order to prevent accidentally starving a frontend we are just going to - // always connect once to every frontend. This is dangerous b/c we may start exceeding promql - // max concurrency. - if concurrentRequests == 0 { - concurrentRequests = 1 - } - - return concurrentRequests -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker_frontend_manager.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker_frontend_manager.go deleted file mode 100644 index 194abe1080383..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker_frontend_manager.go +++ /dev/null @@ -1,172 +0,0 @@ -package frontend - -import ( - "context" - "fmt" - "io" - "net/http" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/httpgrpc/server" - "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/grpcclient" -) - -var ( - backoffConfig = util.BackoffConfig{ - MinBackoff: 50 * time.Millisecond, - MaxBackoff: 1 * time.Second, - } -) - -type frontendManager struct { - server *server.Server - connection io.Closer - client FrontendClient - clientCfg grpcclient.ConfigWithTLS - querierID string - - log log.Logger - - workerCancels []context.CancelFunc - serverCtx context.Context - wg sync.WaitGroup - currentProcessors *atomic.Int32 -} - -func newFrontendManager(serverCtx context.Context, log log.Logger, server *server.Server, connection io.Closer, client FrontendClient, clientCfg grpcclient.ConfigWithTLS, querierID string) *frontendManager { - f := &frontendManager{ - log: log, - connection: connection, - client: client, - clientCfg: clientCfg, - server: server, - serverCtx: serverCtx, - currentProcessors: atomic.NewInt32(0), - querierID: querierID, - } - - return f -} - -func (f *frontendManager) stop() { - f.concurrentRequests(0) - f.wg.Wait() - _ = f.connection.Close() -} - -func (f *frontendManager) concurrentRequests(n int) { - if n < 0 { - n = 0 - } - - for len(f.workerCancels) < n { - ctx, cancel := context.WithCancel(f.serverCtx) - f.workerCancels = append(f.workerCancels, cancel) - - go f.runOne(ctx) - } - - for len(f.workerCancels) > n { - var cancel context.CancelFunc - cancel, f.workerCancels = f.workerCancels[0], f.workerCancels[1:] - cancel() - } -} - -// runOne loops, trying to establish a stream to the frontend to begin -// request processing. -func (f *frontendManager) runOne(ctx context.Context) { - f.wg.Add(1) - defer f.wg.Done() - - f.currentProcessors.Inc() - defer f.currentProcessors.Dec() - - backoff := util.NewBackoff(ctx, backoffConfig) - for backoff.Ongoing() { - c, err := f.client.Process(ctx) - if err != nil { - level.Error(f.log).Log("msg", "error contacting frontend", "err", err) - backoff.Wait() - continue - } - - if err := f.process(c); err != nil { - level.Error(f.log).Log("msg", "error processing requests", "err", err) - backoff.Wait() - continue - } - - backoff.Reset() - } -} - -// process loops processing requests on an established stream. -func (f *frontendManager) process(c Frontend_ProcessClient) error { - // Build a child context so we can cancel a query when the stream is closed. - ctx, cancel := context.WithCancel(c.Context()) - defer cancel() - - for { - request, err := c.Recv() - if err != nil { - return err - } - - switch request.Type { - case HTTP_REQUEST: - // Handle the request on a "background" goroutine, so we go back to - // blocking on c.Recv(). This allows us to detect the stream closing - // and cancel the query. We don't actually handle queries in parallel - // here, as we're running in lock step with the server - each Recv is - // paired with a Send. - go f.runRequest(ctx, request.HttpRequest, func(response *httpgrpc.HTTPResponse) error { - return c.Send(&ClientToFrontend{HttpResponse: response}) - }) - - case GET_ID: - err := c.Send(&ClientToFrontend{ClientID: f.querierID}) - if err != nil { - return err - } - - default: - return fmt.Errorf("unknown request type: %v", request.Type) - } - } -} - -func (f *frontendManager) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, sendHTTPResponse func(response *httpgrpc.HTTPResponse) error) { - response, err := f.server.Handle(ctx, request) - if err != nil { - var ok bool - response, ok = httpgrpc.HTTPResponseFromError(err) - if !ok { - response = &httpgrpc.HTTPResponse{ - Code: http.StatusInternalServerError, - Body: []byte(err.Error()), - } - } - } - - // Ensure responses that are too big are not retried. - if len(response.Body) >= f.clientCfg.GRPC.MaxSendMsgSize { - errMsg := fmt.Sprintf("response larger than the max (%d vs %d)", len(response.Body), f.clientCfg.GRPC.MaxSendMsgSize) - response = &httpgrpc.HTTPResponse{ - Code: http.StatusRequestEntityTooLarge, - Body: []byte(errMsg), - } - level.Error(f.log).Log("msg", "error processing query", "err", errMsg) - } - - if err := sendHTTPResponse(response); err != nil { - level.Error(f.log).Log("msg", "error processing requests", "err", err) - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index 91a108c89e837..f24051489c60d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "strings" + "sync" "time" "github.com/go-kit/kit/log/level" @@ -14,7 +15,8 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" - "github.com/weaveworks/common/user" + "github.com/thanos-io/thanos/pkg/strutil" + "golang.org/x/sync/errgroup" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/purger" @@ -23,6 +25,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/iterators" "github.com/cortexproject/cortex/pkg/querier/lazyquery" "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -39,6 +42,7 @@ type Config struct { IngesterStreaming bool `yaml:"ingester_streaming"` MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` + QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"` // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after"` @@ -71,6 +75,7 @@ type Config struct { var ( errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") errShuffleShardingLookbackLessThanQueryStoreAfter = errors.New("the shuffle-sharding lookback period should be greater or equal than the configured 'query store after'") + errEmptyTimeRange = errors.New("empty time range") ) // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -83,6 +88,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IngesterStreaming, "querier.ingester-streaming", true, "Use streaming RPCs to query ingester.") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") + f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -204,24 +210,27 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter, return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { now := time.Now() - if cfg.MaxQueryIntoFuture > 0 { - maxQueryTime := util.TimeToMillis(now.Add(cfg.MaxQueryIntoFuture)) + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } - if mint > maxQueryTime { - return storage.NoopQuerier(), nil - } - if maxt > maxQueryTime { - maxt = maxQueryTime - } + mint, maxt, err = validateQueryTimeRange(ctx, userID, mint, maxt, limits, cfg.MaxQueryIntoFuture) + if err == errEmptyTimeRange { + return storage.NoopQuerier(), nil + } else if err != nil { + return nil, err } q := querier{ - ctx: ctx, - mint: mint, - maxt: maxt, - chunkIterFn: chunkIterFn, - tombstonesLoader: tombstonesLoader, - limits: limits, + ctx: ctx, + mint: mint, + maxt: maxt, + chunkIterFn: chunkIterFn, + tombstonesLoader: tombstonesLoader, + limits: limits, + maxQueryIntoFuture: cfg.MaxQueryIntoFuture, + queryStoreForLabels: cfg.QueryStoreForLabels, } dqr, err := distributor.Querier(ctx, mint, maxt) @@ -263,8 +272,10 @@ type querier struct { ctx context.Context mint, maxt int64 - tombstonesLoader *purger.TombstonesLoader - limits *validation.Overrides + tombstonesLoader *purger.TombstonesLoader + limits *validation.Overrides + maxQueryIntoFuture time.Duration + queryStoreForLabels bool } // Select implements storage.Querier interface. @@ -283,22 +294,41 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat // querying the long-term storage. // Also, in the recent versions of Prometheus, we pass in the hint but with Func set to "series". // See: https://github.com/prometheus/prometheus/pull/8050 - if sp == nil || sp.Func == "series" { + if (sp == nil || sp.Func == "series") && !q.queryStoreForLabels { + // In this case, the query time range has already been validated when the querier has been + // created. return q.metadataQuerier.Select(true, sp, matchers...) } - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return storage.ErrSeriesSet(err) } - // Validate query time range. - startTime := model.Time(sp.Start) - endTime := model.Time(sp.End) + // Validate query time range. Even if the time range has already been validated when we created + // the querier, we need to check it again here because the time range specified in hints may be + // different. + startMs, endMs, err := validateQueryTimeRange(ctx, userID, sp.Start, sp.End, q.limits, q.maxQueryIntoFuture) + if err == errEmptyTimeRange { + return storage.NoopSeriesSet() + } else if err != nil { + return storage.ErrSeriesSet(err) + } + + // The time range may have been manipulated during the validation, + // so we make sure changes are reflected back to hints. + sp.Start = startMs + sp.End = endMs + + startTime := model.Time(startMs) + endTime := model.Time(endMs) + + // Validate query time range. This validation should be done only for instant / range queries and + // NOT for metadata queries (series, labels) because the query-frontend doesn't support splitting + // of such queries. if maxQueryLength := q.limits.MaxQueryLength(userID); maxQueryLength > 0 && endTime.Sub(startTime) > maxQueryLength { limitErr := validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) return storage.ErrSeriesSet(limitErr) - } tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime) @@ -346,11 +376,91 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat // LabelsValue implements storage.Querier. func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) { - return q.metadataQuerier.LabelValues(name) + if !q.queryStoreForLabels { + return q.metadataQuerier.LabelValues(name) + } + + if len(q.queriers) == 1 { + return q.queriers[0].LabelValues(name) + } + + var ( + g, _ = errgroup.WithContext(q.ctx) + sets = [][]string{} + warnings = storage.Warnings(nil) + + resMtx sync.Mutex + ) + + for _, querier := range q.queriers { + // Need to reassign as the original variable will change and can't be relied on in a goroutine. + querier := querier + g.Go(func() error { + // NB: Values are sorted in Cortex already. + myValues, myWarnings, err := querier.LabelValues(name) + if err != nil { + return err + } + + resMtx.Lock() + sets = append(sets, myValues) + warnings = append(warnings, myWarnings...) + resMtx.Unlock() + + return nil + }) + } + + err := g.Wait() + if err != nil { + return nil, nil, err + } + + return strutil.MergeSlices(sets...), warnings, nil } func (q querier) LabelNames() ([]string, storage.Warnings, error) { - return q.metadataQuerier.LabelNames() + if !q.queryStoreForLabels { + return q.metadataQuerier.LabelNames() + } + + if len(q.queriers) == 1 { + return q.queriers[0].LabelNames() + } + + var ( + g, _ = errgroup.WithContext(q.ctx) + sets = [][]string{} + warnings = storage.Warnings(nil) + + resMtx sync.Mutex + ) + + for _, querier := range q.queriers { + // Need to reassign as the original variable will change and can't be relied on in a goroutine. + querier := querier + g.Go(func() error { + // NB: Names are sorted in Cortex already. + myNames, myWarnings, err := querier.LabelNames() + if err != nil { + return err + } + + resMtx.Lock() + sets = append(sets, myNames) + warnings = append(warnings, myWarnings...) + resMtx.Unlock() + + return nil + }) + } + + err := g.Wait() + if err != nil { + return nil, nil, err + } + + return strutil.MergeSlices(sets...), warnings, nil } func (querier) Close() error { @@ -475,3 +585,43 @@ func UseBeforeTimestampQueryable(queryable storage.Queryable, ts time.Time) Quer ts: t, } } + +func validateQueryTimeRange(ctx context.Context, userID string, startMs, endMs int64, limits *validation.Overrides, maxQueryIntoFuture time.Duration) (int64, int64, error) { + now := model.Now() + startTime := model.Time(startMs) + endTime := model.Time(endMs) + + // Clamp time range based on max query into future. + if maxQueryIntoFuture > 0 && endTime.After(now.Add(maxQueryIntoFuture)) { + origEndTime := endTime + endTime = now.Add(maxQueryIntoFuture) + + // Make sure to log it in traces to ease debugging. + level.Debug(spanlogger.FromContext(ctx)).Log( + "msg", "the end time of the query has been manipulated because of the 'max query into future' setting", + "original", util.FormatTimeModel(origEndTime), + "updated", util.FormatTimeModel(endTime)) + + if endTime.Before(startTime) { + return 0, 0, errEmptyTimeRange + } + } + + // Clamp the time range based on the max query lookback. + if maxQueryLookback := limits.MaxQueryLookback(userID); maxQueryLookback > 0 && startTime.Before(now.Add(-maxQueryLookback)) { + origStartTime := startTime + startTime = now.Add(-maxQueryLookback) + + // Make sure to log it in traces to ease debugging. + level.Debug(spanlogger.FromContext(ctx)).Log( + "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", + "original", util.FormatTimeModel(origStartTime), + "updated", util.FormatTimeModel(startTime)) + + if endTime.Before(startTime) { + return 0, 0, errEmptyTimeRange + } + } + + return int64(startTime), int64(endTime), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go index 54bb46f6d9c74..d61bf8de44410 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go @@ -5,107 +5,92 @@ import ( "net/http" "time" + "github.com/go-kit/kit/log/level" "github.com/prometheus/prometheus/pkg/timestamp" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) // Limits allows us to specify per-tenant runtime limits on the behavior of // the query handling code. type Limits interface { + // MaxQueryLookback returns the max lookback period of queries. + MaxQueryLookback(userID string) time.Duration + + // MaxQueryLength returns the limit of the length (in time) of a query. MaxQueryLength(string) time.Duration + + // MaxQueryParallelism returns the limit to the number of split queries the + // frontend will process in parallel. MaxQueryParallelism(string) int + + // MaxCacheFreshness returns the period after which results are cacheable, + // to prevent caching of very recent results. MaxCacheFreshness(string) time.Duration } -type limits struct { +type limitsMiddleware struct { Limits next Handler } -// LimitsMiddleware creates a new Middleware that invalidates large queries based on Limits interface. -func LimitsMiddleware(l Limits) Middleware { +// NewLimitsMiddleware creates a new Middleware that enforces query limits. +func NewLimitsMiddleware(l Limits) Middleware { return MiddlewareFunc(func(next Handler) Handler { - return limits{ + return limitsMiddleware{ next: next, Limits: l, } }) } -func (l limits) Do(ctx context.Context, r Request) (Response, error) { - userid, err := user.ExtractOrgID(ctx) +func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) { + log, ctx := spanlogger.New(ctx, "limits") + defer log.Finish() + + userID, err := tenant.TenantID(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - maxQueryLen := l.MaxQueryLength(userid) - queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) - if maxQueryLen > 0 && queryLen > maxQueryLen { - return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLen) - } - return l.next.Do(ctx, r) -} + // Clamp the time range based on the max query lookback. + if maxQueryLookback := l.MaxQueryLookback(userID); maxQueryLookback > 0 { + minStartTime := util.TimeToMillis(time.Now().Add(-maxQueryLookback)) -// RequestResponse contains a request response and the respective request that was used. -type RequestResponse struct { - Request Request - Response Response -} + if r.GetEnd() < minStartTime { + // The request is fully outside the allowed range, so we can return an + // empty response. + level.Debug(log).Log( + "msg", "skipping the execution of the query because its time range is before the 'max query lookback' setting", + "reqStart", util.FormatTimeMillis(r.GetStart()), + "redEnd", util.FormatTimeMillis(r.GetEnd()), + "maxQueryLookback", maxQueryLookback) -// DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request. -func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) { - userid, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) - } + return NewEmptyPrometheusResponse(), nil + } - // If one of the requests fail, we want to be able to cancel the rest of them. - ctx, cancel := context.WithCancel(ctx) - defer cancel() + if r.GetStart() < minStartTime { + // Replace the start time in the request. + level.Debug(log).Log( + "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", + "original", util.FormatTimeMillis(r.GetStart()), + "updated", util.FormatTimeMillis(minStartTime)) - // Feed all requests to a bounded intermediate channel to limit parallelism. - intermediate := make(chan Request) - go func() { - for _, req := range reqs { - intermediate <- req + r = r.WithStartEnd(minStartTime, r.GetEnd()) } - close(intermediate) - }() - - respChan, errChan := make(chan RequestResponse), make(chan error) - parallelism := limits.MaxQueryParallelism(userid) - if parallelism > len(reqs) { - parallelism = len(reqs) - } - for i := 0; i < parallelism; i++ { - go func() { - for req := range intermediate { - resp, err := downstream.Do(ctx, req) - if err != nil { - errChan <- err - } else { - respChan <- RequestResponse{req, resp} - } - } - }() } - resps := make([]RequestResponse, 0, len(reqs)) - var firstErr error - for range reqs { - select { - case resp := <-respChan: - resps = append(resps, resp) - case err := <-errChan: - if firstErr == nil { - cancel() - firstErr = err - } + // Enforce the max query length. + if maxQueryLength := l.MaxQueryLength(userID); maxQueryLength > 0 { + queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) + if queryLen > maxQueryLength { + return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLength) } } - return resps, firstErr + return l.next.Do(ctx, r) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go index 1071dd95f868a..6e146a98e5f1a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go @@ -76,7 +76,7 @@ type Request interface { // GetCachingOptions returns the caching options. GetCachingOptions() CachingOptions // WithStartEnd clone the current request with different start and end timestamp. - WithStartEnd(int64, int64) Request + WithStartEnd(startTime int64, endTime int64) Request // WithQuery clone the current request with a different query. WithQuery(string) Request proto.Message @@ -135,15 +135,20 @@ func (resp *PrometheusResponse) minTime() int64 { return result[0].Samples[0].TimestampMs } +// NewEmptyPrometheusResponse returns an empty successful Prometheus query range response. +func NewEmptyPrometheusResponse() *PrometheusResponse { + return &PrometheusResponse{ + Status: StatusSuccess, + Data: PrometheusData{ + ResultType: model.ValMatrix.String(), + Result: []SampleStream{}, + }, + } +} + func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) { if len(responses) == 0 { - return &PrometheusResponse{ - Status: StatusSuccess, - Data: PrometheusData{ - ResultType: model.ValMatrix.String(), - Result: []SampleStream{}, - }, - }, nil + return NewEmptyPrometheusResponse(), nil } promResponses := make([]*PrometheusResponse, 0, len(responses)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index 9462d4574a559..9b7b0e3baeaa7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -19,10 +19,10 @@ import ( "github.com/prometheus/common/model" "github.com/uber/jaeger-client-go" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -179,7 +179,7 @@ func NewResultsCacheMiddleware( } func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go index 38d3bb15c9d31..32d16b9217131 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go @@ -8,6 +8,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/util" ) type RetryMiddlewareMetrics struct { @@ -68,7 +70,7 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { httpResp, ok := httpgrpc.HTTPResponseFromError(err) if !ok || httpResp.Code/100 == 5 { lastErr = err - level.Error(r.log).Log("msg", "error processing request", "try", tries, "err", err) + level.Error(util.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go index e890c218455df..053268d774d0c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go @@ -34,7 +34,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/tenant" ) const day = 24 * time.Hour @@ -126,6 +126,17 @@ func MergeMiddlewares(middleware ...Middleware) Middleware { }) } +// Tripperware is a signature for all http client-side middleware. +type Tripperware func(http.RoundTripper) http.RoundTripper + +// RoundTripFunc is to http.RoundTripper what http.HandlerFunc is to http.Handler. +type RoundTripFunc func(*http.Request) (*http.Response, error) + +// RoundTrip implements http.RoundTripper. +func (f RoundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} + // NewTripperware returns a Tripperware configured with middlewares to limit, align, split, retry and cache requests. func NewTripperware( cfg Config, @@ -138,7 +149,7 @@ func NewTripperware( minShardingLookback time.Duration, registerer prometheus.Registerer, cacheGenNumberLoader CacheGenNumberLoader, -) (frontend.Tripperware, cache.Cache, error) { +) (Tripperware, cache.Cache, error) { // Per tenant query metrics. queriesPerTenant := promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", @@ -149,7 +160,7 @@ func NewTripperware( // Metric used to keep track of each middleware execution duration. metrics := NewInstrumentMiddlewareMetrics(registerer) - queryRangeMiddleware := []Middleware{LimitsMiddleware(limits)} + queryRangeMiddleware := []Middleware{NewLimitsMiddleware(limits)} if cfg.AlignQueriesWithStep { queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("step_align", metrics), StepAlignMiddleware) } @@ -196,18 +207,18 @@ func NewTripperware( queryRangeMiddleware = append(queryRangeMiddleware, InstrumentMiddleware("retry", metrics), NewRetryMiddleware(log, cfg.MaxRetries, NewRetryMiddlewareMetrics(registerer))) } - return frontend.Tripperware(func(next http.RoundTripper) http.RoundTripper { + return func(next http.RoundTripper) http.RoundTripper { // Finally, if the user selected any query range middleware, stitch it in. if len(queryRangeMiddleware) > 0 { queryrange := NewRoundTripper(next, codec, queryRangeMiddleware...) - return frontend.RoundTripFunc(func(r *http.Request) (*http.Response, error) { + return RoundTripFunc(func(r *http.Request) (*http.Response, error) { isQueryRange := strings.HasSuffix(r.URL.Path, "/query_range") op := "query" if isQueryRange { op = "query_range" } - user, err := user.ExtractOrgID(r.Context()) + user, err := tenant.TenantID(r.Context()) // This should never happen anyways because we have auth middleware before this. if err != nil { return nil, err @@ -221,7 +232,7 @@ func NewTripperware( }) } return next - }), c, nil + }, c, nil } type roundTripper struct { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go new file mode 100644 index 0000000000000..ecbbe98f794f4 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go @@ -0,0 +1,71 @@ +package queryrange + +import ( + "context" + "net/http" + + "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/tenant" +) + +// RequestResponse contains a request response and the respective request that was used. +type RequestResponse struct { + Request Request + Response Response +} + +// DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request. +func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) { + userid, err := tenant.TenantID(ctx) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + + // If one of the requests fail, we want to be able to cancel the rest of them. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Feed all requests to a bounded intermediate channel to limit parallelism. + intermediate := make(chan Request) + go func() { + for _, req := range reqs { + intermediate <- req + } + close(intermediate) + }() + + respChan, errChan := make(chan RequestResponse), make(chan error) + parallelism := limits.MaxQueryParallelism(userid) + if parallelism > len(reqs) { + parallelism = len(reqs) + } + for i := 0; i < parallelism; i++ { + go func() { + for req := range intermediate { + resp, err := downstream.Do(ctx, req) + if err != nil { + errChan <- err + } else { + respChan <- RequestResponse{req, resp} + } + } + }() + } + + resps := make([]RequestResponse, 0, len(reqs)) + var firstErr error + for range reqs { + select { + case resp := <-respChan: + resps = append(resps, resp) + case err := <-errChan: + if firstErr == nil { + cancel() + firstErr = err + } + } + } + + return resps, firstErr +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go new file mode 100644 index 0000000000000..0de38e08833f8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go @@ -0,0 +1,62 @@ +package stats + +import ( + "context" + "sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it. + "time" + + "github.com/weaveworks/common/httpgrpc" +) + +type contextKey int + +var ctxKey = contextKey(0) + +// ContextWithEmptyStats returns a context with empty stats. +func ContextWithEmptyStats(ctx context.Context) (*Stats, context.Context) { + stats := &Stats{} + ctx = context.WithValue(ctx, ctxKey, stats) + return stats, ctx +} + +// FromContext gets the Stats out of the Context. Returns nil if stats have not +// been initialised in the context. +func FromContext(ctx context.Context) *Stats { + o := ctx.Value(ctxKey) + if o == nil { + return nil + } + return o.(*Stats) +} + +// AddWallTime adds some time to the counter. +func (s *Stats) AddWallTime(t time.Duration) { + if s == nil { + return + } + + atomic.AddInt64((*int64)(&s.WallTime), int64(t)) +} + +// LoadWallTime returns current wall time. +func (s *Stats) LoadWallTime() time.Duration { + if s == nil { + return 0 + } + + return time.Duration(atomic.LoadInt64((*int64)(&s.WallTime))) +} + +// Merge the provide Stats into this one. +func (s *Stats) Merge(other *Stats) { + if s == nil || other == nil { + return + } + + s.AddWallTime(other.LoadWallTime()) +} + +func ShouldTrackHTTPGRPCResponse(r *httpgrpc.HTTPResponse) bool { + // Do no track statistics for requests failed because of a server error. + return r.Code < 500 +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go new file mode 100644 index 0000000000000..b9ec9a49bad46 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.pb.go @@ -0,0 +1,414 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stats.proto + +package stats + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Stats struct { + // The sum of all wall time spent in the querier to execute the query. + WallTime time.Duration `protobuf:"bytes,1,opt,name=wall_time,json=wallTime,proto3,stdduration" json:"wall_time"` +} + +func (m *Stats) Reset() { *m = Stats{} } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { + return fileDescriptor_b4756a0aec8b9d44, []int{0} +} +func (m *Stats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Stats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Stats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Stats) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stats.Merge(m, src) +} +func (m *Stats) XXX_Size() int { + return m.Size() +} +func (m *Stats) XXX_DiscardUnknown() { + xxx_messageInfo_Stats.DiscardUnknown(m) +} + +var xxx_messageInfo_Stats proto.InternalMessageInfo + +func (m *Stats) GetWallTime() time.Duration { + if m != nil { + return m.WallTime + } + return 0 +} + +func init() { + proto.RegisterType((*Stats)(nil), "stats.Stats") +} + +func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } + +var fileDescriptor_b4756a0aec8b9d44 = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0x2e, 0x49, 0x2c, + 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x73, 0xa4, 0x74, 0xd3, 0x33, 0x4b, + 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xb2, 0x49, + 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0x74, 0x49, 0xc9, 0xa5, 0xe7, 0xe7, 0xa7, 0xe7, + 0xa4, 0x22, 0x54, 0xa5, 0x94, 0x16, 0x25, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0xe4, 0x95, 0x3c, 0xb9, + 0x58, 0x83, 0x41, 0xe6, 0x0a, 0x39, 0x70, 0x71, 0x96, 0x27, 0xe6, 0xe4, 0xc4, 0x97, 0x64, 0xe6, + 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x49, 0xea, 0x41, 0x34, 0xeb, 0xc1, 0x34, 0xeb, + 0xb9, 0x40, 0x35, 0x3b, 0x71, 0x9c, 0xb8, 0x27, 0xcf, 0x30, 0xe3, 0xbe, 0x3c, 0x63, 0x10, 0x07, + 0x48, 0x57, 0x48, 0x66, 0x6e, 0xaa, 0x93, 0xf5, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, + 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, + 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, + 0x88, 0xb7, 0x92, 0xd8, 0xc0, 0x76, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xc4, 0x26, + 0x5d, 0xf3, 0x00, 0x00, 0x00, +} + +func (this *Stats) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Stats) + if !ok { + that2, ok := that.(Stats) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.WallTime != that1.WallTime { + return false + } + return true +} +func (this *Stats) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&stats.Stats{") + s = append(s, "WallTime: "+fmt.Sprintf("%#v", this.WallTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStats(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Stats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.WallTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintStats(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintStats(dAtA []byte, offset int, v uint64) int { + offset -= sovStats(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Stats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.WallTime) + n += 1 + l + sovStats(uint64(l)) + return n +} + +func sovStats(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStats(x uint64) (n int) { + return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Stats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Stats{`, + `WallTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WallTime), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringStats(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Stats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WallTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.WallTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStats(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStats + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthStats + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStats(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthStats + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto new file mode 100644 index 0000000000000..3ec55448af7c2 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package stats; + +option go_package = "stats"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message Stats { + // The sum of all wall time spent in the querier to execute the query. + google.protobuf.Duration wall_time = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go new file mode 100644 index 0000000000000..d91cae6b45e94 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go @@ -0,0 +1,29 @@ +package stats + +import ( + "net/http" + "time" +) + +// WallTimeMiddleware tracks the wall time. +type WallTimeMiddleware struct{} + +// NewWallTimeMiddleware makes a new WallTimeMiddleware. +func NewWallTimeMiddleware() WallTimeMiddleware { + return WallTimeMiddleware{} +} + +// Wrap implements middleware.Interface. +func (m WallTimeMiddleware) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + stats := FromContext(r.Context()) + if stats == nil { + next.ServeHTTP(w, r) + return + } + + startTime := time.Now() + next.ServeHTTP(w, r) + stats.AddWallTime(time.Since(startTime)) + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go new file mode 100644 index 0000000000000..89bd6967168e5 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go @@ -0,0 +1,139 @@ +package worker + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/httpgrpc" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" + "github.com/cortexproject/cortex/pkg/querier/stats" + querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/util" +) + +var ( + processorBackoffConfig = util.BackoffConfig{ + MinBackoff: 50 * time.Millisecond, + MaxBackoff: 1 * time.Second, + } +) + +func newFrontendProcessor(cfg Config, handler RequestHandler, log log.Logger) processor { + return &frontendProcessor{ + log: log, + handler: handler, + maxMessageSize: cfg.GRPCClientConfig.GRPC.MaxSendMsgSize, + querierID: cfg.QuerierID, + queryStatsEnabled: cfg.QueryStatsEnabled, + } +} + +// Handles incoming queries from frontend. +type frontendProcessor struct { + handler RequestHandler + maxMessageSize int + querierID string + queryStatsEnabled bool + + log log.Logger +} + +// runOne loops, trying to establish a stream to the frontend to begin request processing. +func (fp *frontendProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) { + client := frontendv1pb.NewFrontendClient(conn) + + backoff := util.NewBackoff(ctx, processorBackoffConfig) + for backoff.Ongoing() { + c, err := client.Process(ctx) + if err != nil { + level.Error(fp.log).Log("msg", "error contacting frontend", "address", address, "err", err) + backoff.Wait() + continue + } + + if err := fp.process(c); err != nil { + level.Error(fp.log).Log("msg", "error processing requests", "address", address, "err", err) + backoff.Wait() + continue + } + + backoff.Reset() + } +} + +// process loops processing requests on an established stream. +func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) error { + // Build a child context so we can cancel a query when the stream is closed. + ctx, cancel := context.WithCancel(c.Context()) + defer cancel() + + for { + request, err := c.Recv() + if err != nil { + return err + } + + switch request.Type { + case frontendv1pb.HTTP_REQUEST: + // Handle the request on a "background" goroutine, so we go back to + // blocking on c.Recv(). This allows us to detect the stream closing + // and cancel the query. We don't actually handle queries in parallel + // here, as we're running in lock step with the server - each Recv is + // paired with a Send. + go fp.runRequest(ctx, request.HttpRequest, func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error { + return c.Send(&frontendv1pb.ClientToFrontend{ + HttpResponse: response, + Stats: stats, + }) + }) + + case frontendv1pb.GET_ID: + err := c.Send(&frontendv1pb.ClientToFrontend{ClientID: fp.querierID}) + if err != nil { + return err + } + + default: + return fmt.Errorf("unknown request type: %v", request.Type) + } + } +} + +func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error) { + var stats *querier_stats.Stats + if fp.queryStatsEnabled { + stats, ctx = querier_stats.ContextWithEmptyStats(ctx) + } + + response, err := fp.handler.Handle(ctx, request) + if err != nil { + var ok bool + response, ok = httpgrpc.HTTPResponseFromError(err) + if !ok { + response = &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(err.Error()), + } + } + } + + // Ensure responses that are too big are not retried. + if len(response.Body) >= fp.maxMessageSize { + errMsg := fmt.Sprintf("response larger than the max (%d vs %d)", len(response.Body), fp.maxMessageSize) + response = &httpgrpc.HTTPResponse{ + Code: http.StatusRequestEntityTooLarge, + Body: []byte(errMsg), + } + level.Error(fp.log).Log("msg", "error processing query", "err", errMsg) + } + + if err := sendHTTPResponse(response, stats); err != nil { + level.Error(fp.log).Log("msg", "error processing requests", "err", err) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go new file mode 100644 index 0000000000000..8a68c310e205a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/processor_manager.go @@ -0,0 +1,75 @@ +package worker + +import ( + "context" + "sync" + + "go.uber.org/atomic" + "google.golang.org/grpc" +) + +// Manages processor goroutines for single grpc connection. +type processorManager struct { + p processor + conn *grpc.ClientConn + address string + + // Main context to control all goroutines. + ctx context.Context + wg sync.WaitGroup + + // Cancel functions for individual goroutines. + cancelsMu sync.Mutex + cancels []context.CancelFunc + + currentProcessors *atomic.Int32 +} + +func newProcessorManager(ctx context.Context, p processor, conn *grpc.ClientConn, address string) *processorManager { + return &processorManager{ + p: p, + ctx: ctx, + conn: conn, + address: address, + currentProcessors: atomic.NewInt32(0), + } +} + +func (pm *processorManager) stop() { + // Stop all goroutines. + pm.concurrency(0) + + // Wait until they finish. + pm.wg.Wait() + + _ = pm.conn.Close() +} + +func (pm *processorManager) concurrency(n int) { + pm.cancelsMu.Lock() + defer pm.cancelsMu.Unlock() + + if n < 0 { + n = 0 + } + + for len(pm.cancels) < n { + ctx, cancel := context.WithCancel(pm.ctx) + pm.cancels = append(pm.cancels, cancel) + + pm.wg.Add(1) + go func() { + defer pm.wg.Done() + + pm.currentProcessors.Inc() + defer pm.currentProcessors.Dec() + + pm.p.processQueriesOnSingleStream(ctx, pm.conn, pm.address) + }() + } + + for len(pm.cancels) > n { + pm.cancels[0]() + pm.cancels = pm.cancels[1:] + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go new file mode 100644 index 0000000000000..c92da21f70438 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go @@ -0,0 +1,219 @@ +package worker + +import ( + "context" + "fmt" + "net/http" + "time" + + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/weaveworks/common/user" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/middleware" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" + querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/ring/client" + "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/grpcutil" + cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" + "github.com/cortexproject/cortex/pkg/util/services" +) + +func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (*schedulerProcessor, []services.Service) { + p := &schedulerProcessor{ + log: log, + handler: handler, + maxMessageSize: cfg.GRPCClientConfig.GRPC.MaxSendMsgSize, + querierID: cfg.QuerierID, + grpcConfig: cfg.GRPCClientConfig, + queryStatsEnabled: cfg.QueryStatsEnabled, + + frontendClientRequestDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Name: "cortex_querier_query_frontend_request_duration_seconds", + Help: "Time spend doing requests to frontend.", + Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), + }, []string{"operation", "status_code"}), + } + + frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_querier_query_frontend_clients", + Help: "The current number of clients connected to query-frontend.", + }) + + poolConfig := client.PoolConfig{ + CheckInterval: 5 * time.Second, + HealthCheckEnabled: true, + HealthCheckTimeout: 1 * time.Second, + } + + p.frontendPool = client.NewPool("frontend", poolConfig, nil, p.createFrontendClient, frontendClientsGauge, log) + return p, []services.Service{p.frontendPool} +} + +// Handles incoming queries from query-scheduler. +type schedulerProcessor struct { + log log.Logger + handler RequestHandler + grpcConfig grpcclient.ConfigWithTLS + maxMessageSize int + querierID string + queryStatsEnabled bool + + frontendPool *client.Pool + frontendClientRequestDuration *prometheus.HistogramVec +} + +func (sp *schedulerProcessor) processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) { + schedulerClient := schedulerpb.NewSchedulerForQuerierClient(conn) + + backoff := util.NewBackoff(ctx, processorBackoffConfig) + for backoff.Ongoing() { + c, err := schedulerClient.QuerierLoop(ctx) + if err == nil { + err = c.Send(&schedulerpb.QuerierToScheduler{QuerierID: sp.querierID}) + } + + if err != nil { + level.Error(sp.log).Log("msg", "error contacting scheduler", "err", err, "addr", address) + backoff.Wait() + continue + } + + if err := sp.querierLoop(c, address); err != nil { + level.Error(sp.log).Log("msg", "error processing requests from scheduler", "err", err, "addr", address) + backoff.Wait() + continue + } + + backoff.Reset() + } +} + +// process loops processing requests on an established stream. +func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_QuerierLoopClient, address string) error { + // Build a child context so we can cancel a query when the stream is closed. + ctx, cancel := context.WithCancel(c.Context()) + defer cancel() + + for { + request, err := c.Recv() + if err != nil { + return err + } + + // Handle the request on a "background" goroutine, so we go back to + // blocking on c.Recv(). This allows us to detect the stream closing + // and cancel the query. We don't actually handle queries in parallel + // here, as we're running in lock step with the server - each Recv is + // paired with a Send. + go func() { + // We need to inject user into context for sending response back. + ctx := user.InjectOrgID(ctx, request.UserID) + + tracer := opentracing.GlobalTracer() + // Ignore errors here. If we cannot get parent span, we just don't create new one. + parentSpanContext, _ := grpcutil.GetParentSpanForRequest(tracer, request.HttpRequest) + if parentSpanContext != nil { + queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "querier_processor_runRequest", opentracing.ChildOf(parentSpanContext)) + defer queueSpan.Finish() + + ctx = spanCtx + } + logger := util.WithContext(ctx, sp.log) + + sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.HttpRequest) + + // Report back to scheduler that processing of the query has finished. + if err := c.Send(&schedulerpb.QuerierToScheduler{}); err != nil { + level.Error(logger).Log("msg", "error notifying scheduler about finished query", "err", err, "addr", address) + } + }() + } +} + +func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, request *httpgrpc.HTTPRequest) { + var stats *querier_stats.Stats + if sp.queryStatsEnabled { + stats, ctx = querier_stats.ContextWithEmptyStats(ctx) + } + + response, err := sp.handler.Handle(ctx, request) + if err != nil { + var ok bool + response, ok = httpgrpc.HTTPResponseFromError(err) + if !ok { + response = &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(err.Error()), + } + } + } + + // Ensure responses that are too big are not retried. + if len(response.Body) >= sp.maxMessageSize { + level.Error(logger).Log("msg", "response larger than max message size", "size", len(response.Body), "maxMessageSize", sp.maxMessageSize) + + errMsg := fmt.Sprintf("response larger than the max message size (%d vs %d)", len(response.Body), sp.maxMessageSize) + response = &httpgrpc.HTTPResponse{ + Code: http.StatusRequestEntityTooLarge, + Body: []byte(errMsg), + } + } + + c, err := sp.frontendPool.GetClientFor(frontendAddress) + if err == nil { + // Response is empty and uninteresting. + _, err = c.(frontendv2pb.FrontendForQuerierClient).QueryResult(ctx, &frontendv2pb.QueryResultRequest{ + QueryID: queryID, + HttpResponse: response, + Stats: stats, + }) + } + if err != nil { + level.Error(logger).Log("msg", "error notifying frontend about finished query", "err", err, "frontend", frontendAddress) + } +} + +func (sp *schedulerProcessor) createFrontendClient(addr string) (client.PoolClient, error) { + opts, err := sp.grpcConfig.DialOption([]grpc.UnaryClientInterceptor{ + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, + cortex_middleware.PrometheusGRPCUnaryInstrumentation(sp.frontendClientRequestDuration), + }, nil) + + if err != nil { + return nil, err + } + + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, err + } + + return &frontendClient{ + FrontendForQuerierClient: frontendv2pb.NewFrontendForQuerierClient(conn), + HealthClient: grpc_health_v1.NewHealthClient(conn), + conn: conn, + }, nil +} + +type frontendClient struct { + frontendv2pb.FrontendForQuerierClient + grpc_health_v1.HealthClient + conn *grpc.ClientConn +} + +func (fc *frontendClient) Close() error { + return fc.conn.Close() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go new file mode 100644 index 0000000000000..ac47c11a55f69 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go @@ -0,0 +1,269 @@ +package worker + +import ( + "context" + "flag" + "os" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/httpgrpc" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/services" +) + +type Config struct { + FrontendAddress string `yaml:"frontend_address"` + SchedulerAddress string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"dns_lookup_duration"` + + Parallelism int `yaml:"parallelism"` + MatchMaxConcurrency bool `yaml:"match_max_concurrent"` + MaxConcurrentRequests int `yaml:"-"` // Must be same as passed to PromQL Engine. + + QuerierID string `yaml:"id"` + + GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` + + // The following config is injected internally. + QueryStatsEnabled bool `yaml:"-"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.SchedulerAddress, "querier.scheduler-address", "", "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.") + f.StringVar(&cfg.FrontendAddress, "querier.frontend-address", "", "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.") + + f.DurationVar(&cfg.DNSLookupPeriod, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS for query-frontend or query-scheduler address.") + + f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process per query-frontend or query-scheduler.") + f.BoolVar(&cfg.MatchMaxConcurrency, "querier.worker-match-max-concurrent", false, "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.") + f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") + + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) +} + +func (cfg *Config) Validate(log log.Logger) error { + if cfg.FrontendAddress != "" && cfg.SchedulerAddress != "" { + return errors.New("frontend address and scheduler address are mutually exclusive, please use only one") + } + return cfg.GRPCClientConfig.Validate(log) +} + +// Handler for HTTP requests wrapped in protobuf messages. +type RequestHandler interface { + Handle(context.Context, *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) +} + +// Single processor handles all streaming operations to query-frontend or query-scheduler to fetch queries +// and process them. +type processor interface { + // Each invocation of processQueriesOnSingleStream starts new streaming operation to query-frontend + // or query-scheduler to fetch queries and execute them. + // + // This method must react on context being finished, and stop when that happens. + // + // processorManager (not processor) is responsible for starting as many goroutines as needed for each connection. + processQueriesOnSingleStream(ctx context.Context, conn *grpc.ClientConn, address string) +} + +type querierWorker struct { + *services.BasicService + + cfg Config + log log.Logger + + processor processor + + subservices *services.Manager + + mu sync.Mutex + // Set to nil when stop is called... no more managers are created afterwards. + managers map[string]*processorManager +} + +func NewQuerierWorker(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (services.Service, error) { + if cfg.QuerierID == "" { + hostname, err := os.Hostname() + if err != nil { + return nil, errors.Wrap(err, "failed to get hostname for configuring querier ID") + } + cfg.QuerierID = hostname + } + + var processor processor + var servs []services.Service + var address string + + switch { + case cfg.SchedulerAddress != "": + level.Info(log).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.SchedulerAddress) + + address = cfg.SchedulerAddress + processor, servs = newSchedulerProcessor(cfg, handler, log, reg) + + case cfg.FrontendAddress != "": + level.Info(log).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.FrontendAddress) + + address = cfg.FrontendAddress + processor = newFrontendProcessor(cfg, handler, log) + + default: + return nil, errors.New("no query-scheduler or query-frontend address") + } + + return newQuerierWorkerWithProcessor(cfg, log, processor, address, servs) +} + +func newQuerierWorkerWithProcessor(cfg Config, log log.Logger, processor processor, address string, servs []services.Service) (*querierWorker, error) { + f := &querierWorker{ + cfg: cfg, + log: log, + managers: map[string]*processorManager{}, + processor: processor, + } + + // Empty address is only used in tests, where individual targets are added manually. + if address != "" { + w, err := util.NewDNSWatcher(address, cfg.DNSLookupPeriod, f) + if err != nil { + return nil, err + } + + servs = append(servs, w) + } + + if len(servs) > 0 { + subservices, err := services.NewManager(servs...) + if err != nil { + return nil, errors.Wrap(err, "querier worker subservices") + } + + f.subservices = subservices + } + + f.BasicService = services.NewIdleService(f.starting, f.stopping) + return f, nil +} + +func (w *querierWorker) starting(ctx context.Context) error { + if w.subservices == nil { + return nil + } + return services.StartManagerAndAwaitHealthy(ctx, w.subservices) +} + +func (w *querierWorker) stopping(_ error) error { + // Stop all goroutines fetching queries. Note that in Stopping state, + // worker no longer creates new managers in AddressAdded method. + w.mu.Lock() + for _, m := range w.managers { + m.stop() + } + w.mu.Unlock() + + if w.subservices == nil { + return nil + } + + // Stop DNS watcher and services used by processor. + return services.StopManagerAndAwaitStopped(context.Background(), w.subservices) +} + +func (w *querierWorker) AddressAdded(address string) { + ctx := w.ServiceContext() + if ctx == nil || ctx.Err() != nil { + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + if m := w.managers[address]; m != nil { + return + } + + level.Info(w.log).Log("msg", "adding connection", "addr", address) + conn, err := w.connect(context.Background(), address) + if err != nil { + level.Error(w.log).Log("msg", "error connecting", "addr", address, "err", err) + return + } + + w.managers[address] = newProcessorManager(ctx, w.processor, conn, address) + // Called with lock. + w.resetConcurrency() +} + +func (w *querierWorker) AddressRemoved(address string) { + level.Info(w.log).Log("msg", "removing connection", "addr", address) + + w.mu.Lock() + p := w.managers[address] + delete(w.managers, address) + w.mu.Unlock() + + if p != nil { + p.stop() + } +} + +// Must be called with lock. +func (w *querierWorker) resetConcurrency() { + totalConcurrency := 0 + index := 0 + + for _, m := range w.managers { + concurrency := 0 + + if w.cfg.MatchMaxConcurrency { + concurrency = w.cfg.MaxConcurrentRequests / len(w.managers) + + // If max concurrency does not evenly divide into our frontends a subset will be chosen + // to receive an extra connection. Frontend addresses were shuffled above so this will be a + // random selection of frontends. + if index < w.cfg.MaxConcurrentRequests%len(w.managers) { + level.Warn(w.log).Log("msg", "max concurrency is not evenly divisible across targets, adding an extra connection", "addr", m.address) + concurrency++ + } + } else { + concurrency = w.cfg.Parallelism + } + + // If concurrency is 0 then MaxConcurrentRequests is less than the total number of + // frontends/schedulers. In order to prevent accidentally starving a frontend or scheduler we are just going to + // always connect once to every target. This is dangerous b/c we may start exceeding PromQL + // max concurrency. + if concurrency == 0 { + concurrency = 1 + } + + totalConcurrency += concurrency + m.concurrency(concurrency) + index++ + } + + if totalConcurrency > w.cfg.MaxConcurrentRequests { + level.Warn(w.log).Log("msg", "total worker concurrency is greater than promql max concurrency. Queries may be queued in the querier which reduces QOS") + } +} + +func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) { + // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. + opts, err := w.cfg.GRPCClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + + conn, err := grpc.DialContext(ctx, address, opts...) + if err != nil { + return nil, err + } + return conn, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go index 93958920fa8e9..e0ab7ce64b93f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go @@ -6,7 +6,7 @@ import ( func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery { return func() ([]string, error) { - replicationSet, err := r.GetAll(ring.Read) + replicationSet, err := r.GetAllHealthy(ring.Reporting) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index a9e2f344cf063..cc03bf0fbaa5c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -47,21 +47,21 @@ type LifecyclerConfig struct { RingConfig Config `yaml:"ring"` // Config for the ingester lifecycle control - NumTokens int `yaml:"num_tokens"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - ObservePeriod time.Duration `yaml:"observe_period"` - JoinAfter time.Duration `yaml:"join_after"` - MinReadyDuration time.Duration `yaml:"min_ready_duration"` - InfNames []string `yaml:"interface_names"` - FinalSleep time.Duration `yaml:"final_sleep"` - TokensFilePath string `yaml:"tokens_file_path"` - Zone string `yaml:"availability_zone"` + NumTokens int `yaml:"num_tokens"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + ObservePeriod time.Duration `yaml:"observe_period"` + JoinAfter time.Duration `yaml:"join_after"` + MinReadyDuration time.Duration `yaml:"min_ready_duration"` + InfNames []string `yaml:"interface_names"` + FinalSleep time.Duration `yaml:"final_sleep"` + TokensFilePath string `yaml:"tokens_file_path"` + Zone string `yaml:"availability_zone"` + UnregisterOnShutdown bool `yaml:"unregister_on_shutdown"` // For testing, you can override the address and ID of this ingester - Addr string `yaml:"address" doc:"hidden"` - Port int `doc:"hidden"` - ID string `doc:"hidden"` - SkipUnregister bool `yaml:"-"` + Addr string `yaml:"address" doc:"hidden"` + Port int `doc:"hidden"` + ID string `doc:"hidden"` // Injected internally ListenPort int `yaml:"-"` @@ -102,6 +102,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag f.IntVar(&cfg.Port, prefix+"lifecycler.port", 0, "port to advertise in consul (defaults to server.grpc-listen-port).") f.StringVar(&cfg.ID, prefix+"lifecycler.ID", hostname, "ID to register in the ring.") f.StringVar(&cfg.Zone, prefix+"availability-zone", "", "The availability zone where this instance is running.") + f.BoolVar(&cfg.UnregisterOnShutdown, prefix+"unregister-on-shutdown", true, "Unregister from the ring upon clean shutdown. It can be useful to disable for rolling restarts with consistent naming in conjunction with -distributor.extend-writes=false.") } // Lifecycler is responsible for managing the lifecycle of entries in the ring. @@ -122,7 +123,8 @@ type Lifecycler struct { Zone string // Whether to flush if transfer fails on shutdown. - flushOnShutdown *atomic.Bool + flushOnShutdown *atomic.Bool + unregisterOnShutdown *atomic.Bool // We need to remember the ingester state, tokens and registered timestamp just in case the KV store // goes away and comes back empty. The state changes during lifecycle of instance. @@ -176,12 +178,13 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa flushTransferer: flushTransferer, KVStore: store, - Addr: fmt.Sprintf("%s:%d", addr, port), - ID: cfg.ID, - RingName: ringName, - RingKey: ringKey, - flushOnShutdown: atomic.NewBool(flushOnShutdown), - Zone: zone, + Addr: fmt.Sprintf("%s:%d", addr, port), + ID: cfg.ID, + RingName: ringName, + RingKey: ringKey, + flushOnShutdown: atomic.NewBool(flushOnShutdown), + unregisterOnShutdown: atomic.NewBool(cfg.UnregisterOnShutdown), + Zone: zone, actorChan: make(chan func()), @@ -489,7 +492,7 @@ heartbeatLoop: } } - if !i.cfg.SkipUnregister { + if i.ShouldUnregisterOnShutdown() { if err := i.unregister(context.Background()); err != nil { return perrors.Wrapf(err, "failed to unregister from the KV store, ring: %s", i.RingName) } @@ -778,6 +781,16 @@ func (i *Lifecycler) SetFlushOnShutdown(flushOnShutdown bool) { i.flushOnShutdown.Store(flushOnShutdown) } +// ShouldUnregisterOnShutdown returns if unregistering should be skipped on shutdown. +func (i *Lifecycler) ShouldUnregisterOnShutdown() bool { + return i.unregisterOnShutdown.Load() +} + +// SetUnregisterOnShutdown enables/disables unregistering on shutdown. +func (i *Lifecycler) SetUnregisterOnShutdown(enabled bool) { + i.unregisterOnShutdown.Store(enabled) +} + func (i *Lifecycler) processShutdown(ctx context.Context) { flushRequired := i.flushOnShutdown.Load() transferStart := time.Now() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index 19ab91c9e539f..d9ebd78155c21 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -165,6 +165,9 @@ func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration) b case Ruler: healthy = i.State == ACTIVE + + case Compactor: + healthy = i.State == ACTIVE } return healthy && time.Since(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go index 3b15541f3ad55..adc619e85cbfe 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go @@ -10,25 +10,45 @@ import ( // many errors to tolerate. type ReplicationSet struct { Ingesters []IngesterDesc + + // Maximum number of tolerated failing instances. Max errors and max unavailable zones are + // mutually exclusive. MaxErrors int + + // Maximum number of different zones in which instances can fail. Max unavailable zones and + // max errors are mutually exclusive. + MaxUnavailableZones int } // Do function f in parallel for all replicas in the set, erroring is we exceed // MaxErrors and returning early otherwise. func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *IngesterDesc) (interface{}, error)) ([]interface{}, error) { + type instanceResult struct { + res interface{} + err error + instance *IngesterDesc + } + + // Initialise the result tracker, which is use to keep track of successes and failures. + var tracker replicationSetResultTracker + if r.MaxUnavailableZones > 0 { + tracker = newZoneAwareResultTracker(r.Ingesters, r.MaxUnavailableZones) + } else { + tracker = newDefaultResultTracker(r.Ingesters, r.MaxErrors) + } + var ( - errs = make(chan error, len(r.Ingesters)) - resultsChan = make(chan interface{}, len(r.Ingesters)) - minSuccess = len(r.Ingesters) - r.MaxErrors - forceStart = make(chan struct{}, r.MaxErrors) + ch = make(chan instanceResult, len(r.Ingesters)) + forceStart = make(chan struct{}, r.MaxErrors) ) ctx, cancel := context.WithCancel(ctx) defer cancel() + // Spawn a goroutine for each instance. for i := range r.Ingesters { go func(i int, ing *IngesterDesc) { - // wait to send extra requests - if i >= minSuccess && delay > 0 { + // Wait to send extra requests. Works only when zone-awareness is disabled. + if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Ingesters)-r.MaxErrors { after := time.NewTimer(delay) defer after.Stop() select { @@ -39,32 +59,32 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont } } result, err := f(ctx, ing) - if err != nil { - errs <- err - } else { - resultsChan <- result + ch <- instanceResult{ + res: result, + err: err, + instance: ing, } }(i, &r.Ingesters[i]) } - var ( - numErrs int - numSuccess int - results = make([]interface{}, 0, len(r.Ingesters)) - ) - for numSuccess < minSuccess { + results := make([]interface{}, 0, len(r.Ingesters)) + + for !tracker.succeeded() { select { - case err := <-errs: - numErrs++ - if numErrs > r.MaxErrors { - return nil, err - } - // force one of the delayed requests to start - forceStart <- struct{}{} + case res := <-ch: + tracker.done(res.instance, res.err) + if res.err != nil { + if tracker.failed() { + return nil, res.err + } - case result := <-resultsChan: - numSuccess++ - results = append(results, result) + // force one of the delayed requests to start + if delay > 0 && r.MaxUnavailableZones == 0 { + forceStart <- struct{}{} + } + } else { + results = append(results, res.res) + } case <-ctx.Done(): return nil, ctx.Err() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go new file mode 100644 index 0000000000000..09f12e3cebbf6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go @@ -0,0 +1,96 @@ +package ring + +type replicationSetResultTracker interface { + // Signals an instance has done the execution, either successful (no error) + // or failed (with error). + done(instance *IngesterDesc, err error) + + // Returns true if the minimum number of successful results have been received. + succeeded() bool + + // Returns true if the maximum number of failed executions have been reached. + failed() bool +} + +type defaultResultTracker struct { + minSucceeded int + numSucceeded int + numErrors int + maxErrors int +} + +func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultResultTracker { + return &defaultResultTracker{ + minSucceeded: len(instances) - maxErrors, + numSucceeded: 0, + numErrors: 0, + maxErrors: maxErrors, + } +} + +func (t *defaultResultTracker) done(_ *IngesterDesc, err error) { + if err == nil { + t.numSucceeded++ + } else { + t.numErrors++ + } +} + +func (t *defaultResultTracker) succeeded() bool { + return t.numSucceeded >= t.minSucceeded +} + +func (t *defaultResultTracker) failed() bool { + return t.numErrors > t.maxErrors +} + +// zoneAwareResultTracker tracks the results per zone. +// All instances in a zone must succeed in order for the zone to succeed. +type zoneAwareResultTracker struct { + waitingByZone map[string]int + failuresByZone map[string]int + minSuccessfulZones int + maxUnavailableZones int +} + +func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int) *zoneAwareResultTracker { + t := &zoneAwareResultTracker{ + waitingByZone: make(map[string]int), + failuresByZone: make(map[string]int), + maxUnavailableZones: maxUnavailableZones, + } + + for _, instance := range instances { + t.waitingByZone[instance.Zone]++ + } + t.minSuccessfulZones = len(t.waitingByZone) - maxUnavailableZones + + return t +} + +func (t *zoneAwareResultTracker) done(instance *IngesterDesc, err error) { + t.waitingByZone[instance.Zone]-- + + if err != nil { + t.failuresByZone[instance.Zone]++ + } +} + +func (t *zoneAwareResultTracker) succeeded() bool { + successfulZones := 0 + + // The execution succeeded once we successfully received a successful result + // from "all zones - max unavailable zones". + for zone, numWaiting := range t.waitingByZone { + if numWaiting == 0 && t.failuresByZone[zone] == 0 { + successfulZones++ + } + } + + return successfulZones >= t.minSuccessfulZones +} + +func (t *zoneAwareResultTracker) failed() bool { + failedZones := len(t.failuresByZone) + return failedZones > t.maxUnavailableZones +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index 67d96c30ed444..3490853dce4a3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -17,7 +17,15 @@ type ReplicationStrategy interface { ShouldExtendReplicaSet(instance IngesterDesc, op Operation) bool } -type DefaultReplicationStrategy struct{} +type defaultReplicationStrategy struct { + ExtendWrites bool +} + +func NewDefaultReplicationStrategy(extendWrites bool) ReplicationStrategy { + return &defaultReplicationStrategy{ + ExtendWrites: extendWrites, + } +} // Filter decides, given the set of ingesters eligible for a key, // which ingesters you will try and write to and how many failures you will @@ -25,7 +33,7 @@ type DefaultReplicationStrategy struct{} // - Filters out dead ingesters so the one doesn't even try to write to them. // - Checks there is enough ingesters for an operation to succeed. // The ingesters argument may be overwritten. -func (s *DefaultReplicationStrategy) Filter(ingesters []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]IngesterDesc, int, error) { +func (s *defaultReplicationStrategy) Filter(ingesters []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]IngesterDesc, int, error) { // We need a response from a quorum of ingesters, which is n/2 + 1. In the // case of a node joining/leaving, the actual replica set might be bigger // than the replication factor, so use the bigger or the two. @@ -63,15 +71,18 @@ func (s *DefaultReplicationStrategy) Filter(ingesters []IngesterDesc, op Operati return ingesters, len(ingesters) - minSuccess, nil } -func (s *DefaultReplicationStrategy) ShouldExtendReplicaSet(ingester IngesterDesc, op Operation) bool { +func (s *defaultReplicationStrategy) ShouldExtendReplicaSet(ingester IngesterDesc, op Operation) bool { // We do not want to Write to Ingesters that are not ACTIVE, but we do want // to write the extra replica somewhere. So we increase the size of the set // of replicas for the key. This means we have to also increase the // size of the replica set for read, but we can read from Leaving ingesters, // so don't skip it in this case. - // NB dead ingester will be filtered later by DefaultReplicationStrategy.Filter(). - if op == Write && ingester.State != ACTIVE { - return true + // NB dead ingester will be filtered later by defaultReplicationStrategy.Filter(). + if op == Write { + if s.ExtendWrites { + return ingester.State != ACTIVE + } + return false } else if op == Read && (ingester.State != ACTIVE && ingester.State != LEAVING) { return true } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index 7192399d99093..2cdd18948a5db 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -44,7 +44,18 @@ type ReadRing interface { // buf is a slice to be overwritten for the return value // to avoid memory allocation; can be nil. Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet, error) - GetAll(op Operation) (ReplicationSet, error) + + // GetAllHealthy returns all healthy instances in the ring, for the given operation. + // This function doesn't check if the quorum is honored, so doesn't fail if the number + // of unhealthy ingesters is greater than the tolerated max unavailable. + GetAllHealthy(op Operation) (ReplicationSet, error) + + // GetReplicationSetForOperation returns all instances where the input operation should be executed. + // The resulting ReplicationSet doesn't necessarily contains all healthy instances + // in the ring, but could contain the minimum set of instances required to execute + // the input operation. + GetReplicationSetForOperation(op Operation) (ReplicationSet, error) + ReplicationFactor() int IngesterCount() int @@ -75,7 +86,11 @@ const ( // BlocksRead is the operation run by the querier to query blocks via the store-gateway. BlocksRead - Ruler // Used for distributing rule groups between rulers. + // Ruler is the operation used for distributing rule groups between rulers. + Ruler + + // Compactor is the operation used for distributing tenants/blocks across compactors. + Compactor ) var ( @@ -85,6 +100,10 @@ var ( // ErrInstanceNotFound is the error returned when trying to get information for an instance // not registered within the ring. ErrInstanceNotFound = errors.New("instance not found in the ring") + + // ErrTooManyFailedIngesters is the error returned when there are too many failed ingesters for a + // specific operation. + ErrTooManyFailedIngesters = errors.New("too many failed ingesters") ) // Config for a Ring @@ -93,6 +112,7 @@ type Config struct { HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` ReplicationFactor int `yaml:"replication_factor"` ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` + ExtendWrites bool `yaml:"extend_writes"` } // RegisterFlags adds the flags required to config this to the given FlagSet with a specified prefix @@ -107,6 +127,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") f.IntVar(&cfg.ReplicationFactor, prefix+"distributor.replication-factor", 3, "The number of ingesters to write to and read from.") f.BoolVar(&cfg.ZoneAwarenessEnabled, prefix+"distributor.zone-awareness-enabled", false, "True to enable the zone-awareness and replicate ingested samples across different availability zones.") + f.BoolVar(&cfg.ExtendWrites, prefix+"distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") } // Ring holds the information about the members of the consistent hash ring. @@ -159,7 +180,7 @@ func New(cfg Config, name, key string, reg prometheus.Registerer) (*Ring, error) return nil, err } - return NewWithStoreClientAndStrategy(cfg, name, key, store, &DefaultReplicationStrategy{}) + return NewWithStoreClientAndStrategy(cfg, name, key, store, NewDefaultReplicationStrategy(cfg.ExtendWrites)) } func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client, strategy ReplicationStrategy) (*Ring, error) { @@ -313,24 +334,15 @@ func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet }, nil } -// GetAll returns all available ingesters in the ring. -func (r *Ring) GetAll(op Operation) (ReplicationSet, error) { +// GetAllHealthy implements ReadRing. +func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) { r.mtx.RLock() defer r.mtx.RUnlock() - if r.ringDesc == nil || len(r.ringTokens) == 0 { + if r.ringDesc == nil || len(r.ringDesc.Ingesters) == 0 { return ReplicationSet{}, ErrEmptyRing } - // Calculate the number of required ingesters; - // ensure we always require at least RF-1 when RF=3. - numRequired := len(r.ringDesc.Ingesters) - if numRequired < r.cfg.ReplicationFactor { - numRequired = r.cfg.ReplicationFactor - } - maxUnavailable := r.cfg.ReplicationFactor / 2 - numRequired -= maxUnavailable - ingesters := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) for _, ingester := range r.ringDesc.Ingesters { if r.IsHealthy(&ingester, op) { @@ -338,13 +350,89 @@ func (r *Ring) GetAll(op Operation) (ReplicationSet, error) { } } - if len(ingesters) < numRequired { - return ReplicationSet{}, fmt.Errorf("too many failed ingesters") + return ReplicationSet{ + Ingesters: ingesters, + MaxErrors: 0, + }, nil +} + +// GetReplicationSetForOperation implements ReadRing. +func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.ringDesc == nil || len(r.ringTokens) == 0 { + return ReplicationSet{}, ErrEmptyRing + } + + // Build the initial replication set, excluding unhealthy instances. + healthyInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + zoneFailures := make(map[string]struct{}) + for _, ingester := range r.ringDesc.Ingesters { + if r.IsHealthy(&ingester, op) { + healthyInstances = append(healthyInstances, ingester) + } else { + zoneFailures[ingester.Zone] = struct{}{} + } + } + + // Max errors and max unavailable zones are mutually exclusive. We initialise both + // to 0 and then we update them whether zone-awareness is enabled or not. + maxErrors := 0 + maxUnavailableZones := 0 + + if r.cfg.ZoneAwarenessEnabled { + // Given data is replicated to RF different zones, we can tolerate a number of + // RF/2 failing zones. However, we need to protect from the case the ring currently + // contains instances in a number of zones < RF. + numReplicatedZones := util.Min(len(r.ringZones), r.cfg.ReplicationFactor) + minSuccessZones := (numReplicatedZones / 2) + 1 + maxUnavailableZones = minSuccessZones - 1 + + if len(zoneFailures) > maxUnavailableZones { + return ReplicationSet{}, ErrTooManyFailedIngesters + } + + if len(zoneFailures) > 0 { + // We remove all instances (even healthy ones) from zones with at least + // 1 failing ingester. Due to how replication works when zone-awareness is + // enabled (data is replicated to RF different zones), there's no benefit in + // querying healthy instances from "failing zones". A zone is considered + // failed if there is single error. + filteredInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + for _, ingester := range healthyInstances { + if _, ok := zoneFailures[ingester.Zone]; !ok { + filteredInstances = append(filteredInstances, ingester) + } + } + + healthyInstances = filteredInstances + } + + // Since we removed all instances from zones containing at least 1 failing + // instance, we have to decrease the max unavailable zones accordingly. + maxUnavailableZones -= len(zoneFailures) + } else { + // Calculate the number of required ingesters; + // ensure we always require at least RF-1 when RF=3. + numRequired := len(r.ringDesc.Ingesters) + if numRequired < r.cfg.ReplicationFactor { + numRequired = r.cfg.ReplicationFactor + } + // We can tolerate this many failures + numRequired -= r.cfg.ReplicationFactor / 2 + + if len(healthyInstances) < numRequired { + return ReplicationSet{}, ErrTooManyFailedIngesters + } + + maxErrors = len(healthyInstances) - numRequired } return ReplicationSet{ - Ingesters: ingesters, - MaxErrors: len(ingesters) - numRequired, + Ingesters: healthyInstances, + MaxErrors: maxErrors, + MaxUnavailableZones: maxUnavailableZones, }, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go index c234d05b9ceb2..6f28988eedaec 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go @@ -82,6 +82,40 @@ func WaitInstanceState(ctx context.Context, r *Ring, instanceID string, state In return backoff.Err() } +// WaitRingStability monitors the ring topology for the provided operation and waits until it +// keeps stable for at least minStability. +func WaitRingStability(ctx context.Context, r *Ring, op Operation, minStability, maxWaiting time.Duration) error { + // Configure the max waiting time as a context deadline. + ctx, cancel := context.WithTimeout(ctx, maxWaiting) + defer cancel() + + // Get the initial ring state. + ringLastState, _ := r.GetAllHealthy(op) // nolint:errcheck + ringLastStateTs := time.Now() + + const pollingFrequency = time.Second + pollingTicker := time.NewTicker(pollingFrequency) + defer pollingTicker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-pollingTicker.C: + // We ignore the error because in case of error it will return an empty + // replication set which we use to compare with the previous state. + currRingState, _ := r.GetAllHealthy(op) // nolint:errcheck + + if HasReplicationSetChanged(ringLastState, currRingState) { + ringLastState = currRingState + ringLastStateTs = time.Now() + } else if time.Since(ringLastStateTs) >= minStability { + return nil + } + } + } +} + // getZones return the list zones from the provided tokens. The returned list // is guaranteed to be sorted. func getZones(tokens map[string][]TokenDesc) []string { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go index ffa6e92836595..ac3d959b8c02a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go @@ -23,6 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ruler/rules" store "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" ) @@ -134,7 +135,7 @@ func NewAPI(r *Ruler, s rules.RuleStore) *API { func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { logger := util.WithContext(req.Context(), util.Logger) - userID, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + userID, err := tenant.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) respondError(logger, w, "no valid org id found") @@ -142,7 +143,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { } w.Header().Set("Content-Type", "application/json") - rgs, err := a.ruler.GetRules(ctx) + rgs, err := a.ruler.GetRules(req.Context()) if err != nil { respondError(logger, w, err.Error()) @@ -226,7 +227,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { logger := util.WithContext(req.Context(), util.Logger) - userID, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + userID, err := tenant.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) respondError(logger, w, "no valid org id found") @@ -234,7 +235,7 @@ func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { } w.Header().Set("Content-Type", "application/json") - rgs, err := a.ruler.GetRules(ctx) + rgs, err := a.ruler.GetRules(req.Context()) if err != nil { respondError(logger, w, err.Error()) @@ -355,7 +356,7 @@ func parseGroupName(params map[string]string) (string, error) { // and returns them in that order. It also allows users to require a namespace or group name and return // an error if it they can not be parsed. func parseRequest(req *http.Request, requireNamespace, requireGroup bool) (string, string, string, error) { - userID, err := user.ExtractOrgID(req.Context()) + userID, err := tenant.TenantID(req.Context()) if err != nil { return "", "", "", user.ErrNoOrgID } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go index ec7dd113fdd8c..5057215e0140d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go @@ -109,7 +109,7 @@ func (r *DefaultMultiTenantManager) SyncRuleGroups(ctx context.Context, ruleGrou r.lastReloadSuccessful.DeleteLabelValues(userID) r.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID) r.configUpdatesTotal.DeleteLabelValues(userID) - r.userManagerMetrics.DeleteUserRegistry(userID) + r.userManagerMetrics.RemoveUserRegistry(userID) level.Info(r.logger).Log("msg", "deleting rule manager", "user", userID) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go index f4c3942229477..c14c966af3def 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go @@ -1,8 +1,6 @@ package ruler import ( - "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/util" @@ -11,9 +9,7 @@ import ( // ManagerMetrics aggregates metrics exported by the Prometheus // rules package and returns them as Cortex metrics type ManagerMetrics struct { - // Maps userID -> registry - regsMu sync.Mutex - regs map[string]*prometheus.Registry + regs *util.UserRegistries EvalDuration *prometheus.Desc IterationDuration *prometheus.Desc @@ -30,8 +26,7 @@ type ManagerMetrics struct { // NewManagerMetrics returns a ManagerMetrics struct func NewManagerMetrics() *ManagerMetrics { return &ManagerMetrics{ - regs: map[string]*prometheus.Registry{}, - regsMu: sync.Mutex{}, + regs: util.NewUserRegistries(), EvalDuration: prometheus.NewDesc( "cortex_prometheus_rule_evaluation_duration_seconds", @@ -96,33 +91,14 @@ func NewManagerMetrics() *ManagerMetrics { } } -// AddUserRegistry adds a Prometheus registry to the struct +// AddUserRegistry adds a user-specific Prometheus registry. func (m *ManagerMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regsMu.Lock() - defer m.regsMu.Unlock() - - m.regs[user] = reg -} - -// DeleteUserRegistry removes user-specific Prometheus registry. -func (m *ManagerMetrics) DeleteUserRegistry(user string) { - m.regsMu.Lock() - defer m.regsMu.Unlock() - - delete(m.regs, user) + m.regs.AddUserRegistry(user, reg) } -// Registries returns a map of prometheus registries managed by the struct -func (m *ManagerMetrics) Registries() map[string]*prometheus.Registry { - regs := map[string]*prometheus.Registry{} - - m.regsMu.Lock() - defer m.regsMu.Unlock() - for uid, r := range m.regs { - regs[uid] = r - } - - return regs +// RemoveUserRegistry removes user-specific Prometheus registry. +func (m *ManagerMetrics) RemoveUserRegistry(user string) { + m.regs.RemoveUserRegistry(user, true) } // Describe implements the Collector interface @@ -141,10 +117,10 @@ func (m *ManagerMetrics) Describe(out chan<- *prometheus.Desc) { // Collect implements the Collector interface func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) { - data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.Registries()) + data := m.regs.BuildMetricFamiliesPerUser() // WARNING: It is important that all metrics generated in this method are "Per User". - // Thanks to that we can actually *remove* metrics for given user (see DeleteUserRegistry). + // Thanks to that we can actually *remove* metrics for given user (see RemoveUserRegistry). // If same user is later re-added, all metrics will start from 0, which is fine. data.SendSumOfSummariesPerUser(out, m.EvalDuration, "prometheus_rule_evaluation_duration_seconds") diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index 8768832936ed1..6d8c6ab5bfc8a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -30,10 +30,11 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ruler/rules" store "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/tls" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -62,8 +63,8 @@ const ( type Config struct { // This is used for template expansion in alerts; must be a valid URL. ExternalURL flagext.URLValue `yaml:"external_url"` - // TLS parameters for the GRPC Client - ClientTLSConfig tls.ClientConfig `yaml:"ruler_client"` + // GRPC Client configuration. + ClientTLSConfig grpcclient.ConfigWithTLS `yaml:"ruler_client"` // How frequently to evaluate rules by default. EvaluationInterval time.Duration `yaml:"evaluation_interval"` // Deprecated. Replaced with pkg/util/validation/Limits.RulerEvaluationDelay field. @@ -109,7 +110,7 @@ type Config struct { } // Validate config and returns error on failure -func (cfg *Config) Validate(limits validation.Limits) error { +func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -121,6 +122,9 @@ func (cfg *Config) Validate(limits validation.Limits) error { if err := cfg.StoreConfig.Validate(); err != nil { return errors.Wrap(err, "invalid storage config") } + if err := cfg.ClientTLSConfig.Validate(log); err != nil { + return errors.Wrap(err, "invalid ruler gRPC client config") + } return nil } @@ -406,7 +410,7 @@ func (r *Ruler) run(ctx context.Context) error { var ringLastState ring.ReplicationSet if r.cfg.EnableSharding { - ringLastState, _ = r.ring.GetAll(ring.Ruler) + ringLastState, _ = r.ring.GetAllHealthy(ring.Ruler) ringTicker := time.NewTicker(util.DurationWithJitter(r.cfg.RingCheckPeriod, 0.2)) defer ringTicker.Stop() ringTickerChan = ringTicker.C @@ -422,7 +426,7 @@ func (r *Ruler) run(ctx context.Context) error { case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. - currRingState, _ := r.ring.GetAll(ring.Ruler) + currRingState, _ := r.ring.GetAllHealthy(ring.Ruler) if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState @@ -584,7 +588,7 @@ func filterRuleGroups(userID string, ruleGroups []*store.RuleGroupDesc, ring rin // GetRules retrieves the running rules from this ruler and all running rulers in the ring if // sharding is enabled func (r *Ruler) GetRules(ctx context.Context) ([]*GroupStateDesc, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id found in context") } @@ -684,7 +688,7 @@ func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) { } func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) { - rulers, err := r.ring.GetAll(ring.Ruler) + rulers, err := r.ring.GetReplicationSetForOperation(ring.Ruler) if err != nil { return nil, err } @@ -694,14 +698,14 @@ func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) return nil, fmt.Errorf("unable to inject user ID into grpc request, %v", err) } - rgs := []*GroupStateDesc{} + var rgs []*GroupStateDesc for _, rlr := range rulers.Ingesters { - dialOpts, err := r.cfg.ClientTLSConfig.GetGRPCDialOptions() + dialOpts, err := r.cfg.ClientTLSConfig.DialOption(nil, nil) if err != nil { return nil, err } - conn, err := grpc.Dial(rlr.Addr, dialOpts...) + conn, err := grpc.DialContext(ctx, rlr.Addr, dialOpts...) if err != nil { return nil, err } @@ -724,7 +728,7 @@ func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) // Rules implements the rules service func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, error) { - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id found in context") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go index 93941b8c7e04f..67db4d9832d69 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go @@ -54,6 +54,9 @@ func (cfg *RuleStoreConfig) Validate() error { if err := cfg.Azure.Validate(); err != nil { return errors.Wrap(err, "invalid Azure Storage config") } + if err := cfg.S3.Validate(); err != nil { + return errors.Wrap(err, "invalid S3 Storage config") + } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go new file mode 100644 index 0000000000000..342e2a26d8a76 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/queue.go @@ -0,0 +1,189 @@ +package queue + +import ( + "context" + "sync" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" +) + +var ( + ErrTooManyRequests = errors.New("too many outstanding requests") + ErrStopped = errors.New("queue is stopped") +) + +// UserIndex is opaque type that allows to resume iteration over users between successive calls +// of RequestQueue.GetNextRequestForQuerier method. +type UserIndex struct { + last int +} + +// Modify index to start iteration on the same user, for which last queue was returned. +func (ui UserIndex) ReuseLastUser() UserIndex { + if ui.last >= 0 { + return UserIndex{last: ui.last - 1} + } + return ui +} + +// FirstUser returns UserIndex that starts iteration over user queues from the very first user. +func FirstUser() UserIndex { + return UserIndex{last: -1} +} + +// Request stored into the queue. +type Request interface{} + +// RequestQueue holds incoming requests in per-user queues. It also assigns each user specified number of queriers, +// and when querier asks for next request to handle (using GetNextRequestForQuerier), it returns requests +// in a fair fashion. +type RequestQueue struct { + connectedQuerierWorkers *atomic.Int32 + + mtx sync.Mutex + cond *sync.Cond // Notified when request is enqueued or dequeued, or querier is disconnected. + queues *queues + stopped bool + + queueLength *prometheus.GaugeVec // Per user. +} + +func NewRequestQueue(maxOutstandingPerTenant int, queueLength *prometheus.GaugeVec) *RequestQueue { + q := &RequestQueue{ + queues: newUserQueues(maxOutstandingPerTenant), + connectedQuerierWorkers: atomic.NewInt32(0), + queueLength: queueLength, + } + + q.cond = sync.NewCond(&q.mtx) + + return q +} + +// Puts the request into the queue. MaxQueries is user-specific value that specifies how many queriers can +// this user use (zero or negative = all queriers). It is passed to each EnqueueRequest, because it can change +// between calls. +// +// If request is successfully enqueued, successFn is called with the lock held, before any querier can receive the request. +func (q *RequestQueue) EnqueueRequest(userID string, req Request, maxQueriers int, successFn func()) error { + q.mtx.Lock() + defer q.mtx.Unlock() + + if q.stopped { + return ErrStopped + } + + queue := q.queues.getOrAddQueue(userID, maxQueriers) + if queue == nil { + // This can only happen if userID is "". + return errors.New("no queue found") + } + + select { + case queue <- req: + q.queueLength.WithLabelValues(userID).Inc() + q.cond.Broadcast() + // Call this function while holding a lock. This guarantees that no querier can fetch the request before function returns. + if successFn != nil { + successFn() + } + return nil + default: + return ErrTooManyRequests + } +} + +// GetNextRequestForQuerier find next user queue and takes the next request off of it. Will block if there are no requests. +// By passing user index from previous call of this method, querier guarantees that it iterates over all users fairly. +// If querier finds that request from the user is already expired, it can get a request for the same user by using UserIndex.ReuseLastUser. +func (q *RequestQueue) GetNextRequestForQuerier(ctx context.Context, last UserIndex, querierID string) (Request, UserIndex, error) { + q.mtx.Lock() + defer q.mtx.Unlock() + + querierWait := false + +FindQueue: + // We need to wait if there are no users, or no pending requests for given querier. + for (q.queues.len() == 0 || querierWait) && ctx.Err() == nil && !q.stopped { + querierWait = false + q.cond.Wait() + } + + if q.stopped { + return nil, last, ErrStopped + } + + if err := ctx.Err(); err != nil { + return nil, last, err + } + + for { + queue, userID, idx := q.queues.getNextQueueForQuerier(last.last, querierID) + last.last = idx + if queue == nil { + break + } + + // Pick next request from the queue. + for { + request := <-queue + if len(queue) == 0 { + q.queues.deleteQueue(userID) + } + + q.queueLength.WithLabelValues(userID).Dec() + + // Tell close() we've processed a request. + q.cond.Broadcast() + + return request, last, nil + } + } + + // There are no unexpired requests, so we can get back + // and wait for more requests. + querierWait = true + goto FindQueue +} + +func (q *RequestQueue) Stop() { + q.mtx.Lock() + defer q.mtx.Unlock() + + for q.queues.len() > 0 && q.connectedQuerierWorkers.Load() > 0 { + q.cond.Wait() + } + + // Only stop after dispatching enqueued requests. + q.stopped = true + + // If there are still goroutines in GetNextRequestForQuerier method, they get notified. + q.cond.Broadcast() +} + +func (q *RequestQueue) RegisterQuerierConnection(querier string) { + q.connectedQuerierWorkers.Inc() + + q.mtx.Lock() + defer q.mtx.Unlock() + q.queues.addQuerierConnection(querier) +} + +func (q *RequestQueue) UnregisterQuerierConnection(querier string) { + q.connectedQuerierWorkers.Dec() + + q.mtx.Lock() + defer q.mtx.Unlock() + q.queues.removeQuerierConnection(querier) +} + +// When querier is waiting for next request, this unblocks the method. +func (q *RequestQueue) QuerierDisconnecting() { + q.cond.Broadcast() +} + +func (q *RequestQueue) GetConnectedQuerierWorkersMetric() float64 { + return float64(q.connectedQuerierWorkers.Load()) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend_querier_queues.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go similarity index 97% rename from vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend_querier_queues.go rename to vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go index 23c1435d43793..223a7500ff4c9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend_querier_queues.go +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/queue/user_queues.go @@ -1,4 +1,4 @@ -package frontend +package queue import ( "math/rand" @@ -26,7 +26,7 @@ type queues struct { } type userQueue struct { - ch chan *request + ch chan Request // If not nil, only these queriers can handle user requests. If nil, all queriers can. // We set this to nil if number of available queriers <= maxQueriers. @@ -74,7 +74,7 @@ func (q *queues) deleteQueue(userID string) { // MaxQueriers is used to compute which queriers should handle requests for this user. // If maxQueriers is <= 0, all queriers can handle this user's requests. // If maxQueriers has changed since the last call, queriers for this are recomputed. -func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan *request { +func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan Request { // Empty user is not allowed, as that would break our users list ("" is used for free spot). if userID == "" { return nil @@ -88,7 +88,7 @@ func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan *request { if uq == nil { uq = &userQueue{ - ch: make(chan *request, q.maxUserQueueSize), + ch: make(chan Request, q.maxUserQueueSize), seed: util.ShuffleShardSeed(userID, ""), index: -1, } @@ -121,7 +121,7 @@ func (q *queues) getOrAddQueue(userID string, maxQueriers int) chan *request { // Finds next queue for the querier. To support fair scheduling between users, client is expected // to pass last user index returned by this function as argument. Is there was no previous // last user index, use -1. -func (q *queues) getNextQueueForQuerier(lastUserIndex int, querier string) (chan *request, string, int) { +func (q *queues) getNextQueueForQuerier(lastUserIndex int, querier string) (chan Request, string, int) { uid := lastUserIndex for iters := 0; iters < len(q.users); iters++ { diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go new file mode 100644 index 0000000000000..47aed9561e167 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go @@ -0,0 +1,460 @@ +package scheduler + +import ( + "context" + "errors" + "flag" + "io" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + otgrpc "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/user" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" + "github.com/cortexproject/cortex/pkg/scheduler/queue" + "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/grpcutil" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + errSchedulerIsNotRunning = errors.New("scheduler is not running") +) + +// Scheduler is responsible for queueing and dispatching queries to Queriers. +type Scheduler struct { + services.Service + + cfg Config + log log.Logger + + limits Limits + + connectedFrontendsMu sync.Mutex + connectedFrontends map[string]*connectedFrontend + + requestQueue *queue.RequestQueue + + pendingRequestsMu sync.Mutex + pendingRequests map[requestKey]*schedulerRequest // Request is kept in this map even after being dispatched to querier. It can still be canceled at that time. + + // Metrics. + connectedQuerierClients prometheus.GaugeFunc + connectedFrontendClients prometheus.GaugeFunc + queueDuration prometheus.Histogram +} + +type requestKey struct { + frontendAddr string + queryID uint64 +} + +type connectedFrontend struct { + connections int + + // This context is used for running all queries from the same frontend. + // When last frontend connection is closed, context is canceled. + ctx context.Context + cancel context.CancelFunc +} + +type Config struct { + MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant"` + + GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.") + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", f) +} + +// NewScheduler creates a new Scheduler. +func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Scheduler, error) { + queueLength := promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_query_scheduler_queue_length", + Help: "Number of queries in the queue.", + }, []string{"user"}) + + s := &Scheduler{ + cfg: cfg, + log: log, + limits: limits, + + requestQueue: queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, queueLength), + pendingRequests: map[requestKey]*schedulerRequest{}, + connectedFrontends: map[string]*connectedFrontend{}, + } + + s.queueDuration = promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_query_scheduler_queue_duration_seconds", + Help: "Time spend by requests in queue before getting picked up by a querier.", + Buckets: prometheus.DefBuckets, + }) + s.connectedQuerierClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_scheduler_connected_querier_clients", + Help: "Number of querier worker clients currently connected to the query-scheduler.", + }, s.requestQueue.GetConnectedQuerierWorkersMetric) + s.connectedFrontendClients = promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_query_scheduler_connected_frontend_clients", + Help: "Number of query-frontend worker clients currently connected to the query-scheduler.", + }, s.getConnectedFrontendClientsMetric) + + s.Service = services.NewIdleService(nil, s.stopping) + return s, nil +} + +// Limits needed for the Query Frontend - interface used for decoupling. +type Limits interface { + // Returns max queriers to use per tenant, or 0 if shuffle sharding is disabled. + MaxQueriersPerUser(user string) int +} + +type schedulerRequest struct { + frontendAddress string + userID string + queryID uint64 + request *httpgrpc.HTTPRequest + + enqueueTime time.Time + + ctx context.Context + ctxCancel context.CancelFunc + queueSpan opentracing.Span + + // This is only used for testing. + parentSpanContext opentracing.SpanContext +} + +// This method handles connection from frontend. +func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_FrontendLoopServer) error { + frontendAddress, frontendCtx, err := s.frontendConnected(frontend) + if err != nil { + return err + } + defer s.frontendDisconnected(frontendAddress) + + // Response to INIT. If scheduler is not running, we skip for-loop, send SHUTTING_DOWN and exit this method. + if s.State() == services.Running { + if err := frontend.Send(&schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK}); err != nil { + return err + } + } + + // We stop accepting new queries in Stopping state. By returning quickly, we disconnect frontends, which in turns + // cancels all their queries. + for s.State() == services.Running { + msg, err := frontend.Recv() + if err != nil { + // No need to report this as error, it is expected when query-frontend performs SendClose() (as frontendSchedulerWorker does). + if err == io.EOF { + return nil + } + return err + } + + if s.State() != services.Running { + break // break out of the loop, and send SHUTTING_DOWN message. + } + + var resp *schedulerpb.SchedulerToFrontend + + switch msg.GetType() { + case schedulerpb.ENQUEUE: + err = s.enqueueRequest(frontendCtx, frontendAddress, msg) + switch { + case err == nil: + resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} + case err == queue.ErrTooManyRequests: + resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT} + default: + resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.ERROR, Error: err.Error()} + } + + case schedulerpb.CANCEL: + s.cancelRequestAndRemoveFromPending(frontendAddress, msg.QueryID) + resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} + + default: + level.Error(s.log).Log("msg", "unknown request type from frontend", "addr", frontendAddress, "type", msg.GetType()) + return errors.New("unknown request type") + } + + err = frontend.Send(resp) + // Failure to send response results in ending this connection. + if err != nil { + return err + } + } + + // Report shutdown back to frontend, so that it can retry with different scheduler. Also stop the frontend loop. + return frontend.Send(&schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN}) +} + +func (s *Scheduler) frontendConnected(frontend schedulerpb.SchedulerForFrontend_FrontendLoopServer) (string, context.Context, error) { + msg, err := frontend.Recv() + if err != nil { + return "", nil, err + } + if msg.Type != schedulerpb.INIT || msg.FrontendAddress == "" { + return "", nil, errors.New("no frontend address") + } + + s.connectedFrontendsMu.Lock() + defer s.connectedFrontendsMu.Unlock() + + cf := s.connectedFrontends[msg.FrontendAddress] + if cf == nil { + cf = &connectedFrontend{ + connections: 0, + } + cf.ctx, cf.cancel = context.WithCancel(context.Background()) + s.connectedFrontends[msg.FrontendAddress] = cf + } + + cf.connections++ + return msg.FrontendAddress, cf.ctx, nil +} + +func (s *Scheduler) frontendDisconnected(frontendAddress string) { + s.connectedFrontendsMu.Lock() + defer s.connectedFrontendsMu.Unlock() + + cf := s.connectedFrontends[frontendAddress] + cf.connections-- + if cf.connections == 0 { + delete(s.connectedFrontends, frontendAddress) + cf.cancel() + } +} + +func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr string, msg *schedulerpb.FrontendToScheduler) error { + // Create new context for this request, to support cancellation. + ctx, cancel := context.WithCancel(frontendContext) + shouldCancel := true + defer func() { + if shouldCancel { + cancel() + } + }() + + // Extract tracing information from headers in HTTP request. FrontendContext doesn't have the correct tracing + // information, since that is a long-running request. + tracer := opentracing.GlobalTracer() + parentSpanContext, err := grpcutil.GetParentSpanForRequest(tracer, msg.HttpRequest) + if err != nil { + return err + } + + userID := msg.GetUserID() + + req := &schedulerRequest{ + frontendAddress: frontendAddr, + userID: msg.UserID, + queryID: msg.QueryID, + request: msg.HttpRequest, + } + + req.parentSpanContext = parentSpanContext + req.queueSpan, req.ctx = opentracing.StartSpanFromContextWithTracer(ctx, tracer, "queued", opentracing.ChildOf(parentSpanContext)) + req.enqueueTime = time.Now() + req.ctxCancel = cancel + + maxQueriers := s.limits.MaxQueriersPerUser(userID) + + return s.requestQueue.EnqueueRequest(userID, req, maxQueriers, func() { + shouldCancel = false + + s.pendingRequestsMu.Lock() + defer s.pendingRequestsMu.Unlock() + s.pendingRequests[requestKey{frontendAddr: frontendAddr, queryID: msg.QueryID}] = req + }) +} + +// This method doesn't do removal from the queue. +func (s *Scheduler) cancelRequestAndRemoveFromPending(frontendAddr string, queryID uint64) { + s.pendingRequestsMu.Lock() + defer s.pendingRequestsMu.Unlock() + + key := requestKey{frontendAddr: frontendAddr, queryID: queryID} + req := s.pendingRequests[key] + if req != nil { + req.ctxCancel() + } + delete(s.pendingRequests, key) +} + +// QuerierLoop is started by querier to receive queries from scheduler. +func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer) error { + resp, err := querier.Recv() + if err != nil { + return err + } + + querierID := resp.GetQuerierID() + + s.requestQueue.RegisterQuerierConnection(querierID) + defer s.requestQueue.UnregisterQuerierConnection(querierID) + + // If the downstream connection to querier is cancelled, + // we need to ping the condition variable to unblock getNextRequestForQuerier. + // Ideally we'd have ctx aware condition variables... + go func() { + <-querier.Context().Done() + s.requestQueue.QuerierDisconnecting() + }() + + lastUserIndex := queue.FirstUser() + + // In stopping state scheduler is not accepting new queries, but still dispatching queries in the queues. + for s.isRunningOrStopping() { + req, idx, err := s.requestQueue.GetNextRequestForQuerier(querier.Context(), lastUserIndex, querierID) + if err != nil { + return err + } + lastUserIndex = idx + + r := req.(*schedulerRequest) + + s.queueDuration.Observe(time.Since(r.enqueueTime).Seconds()) + r.queueSpan.Finish() + + /* + We want to dequeue the next unexpired request from the chosen tenant queue. + The chance of choosing a particular tenant for dequeueing is (1/active_tenants). + This is problematic under load, especially with other middleware enabled such as + querier.split-by-interval, where one request may fan out into many. + If expired requests aren't exhausted before checking another tenant, it would take + n_active_tenants * n_expired_requests_at_front_of_queue requests being processed + before an active request was handled for the tenant in question. + If this tenant meanwhile continued to queue requests, + it's possible that it's own queue would perpetually contain only expired requests. + */ + + if r.ctx.Err() != nil { + // Remove from pending requests. + s.cancelRequestAndRemoveFromPending(r.frontendAddress, r.queryID) + + lastUserIndex = lastUserIndex.ReuseLastUser() + continue + } + + if err := s.forwardRequestToQuerier(querier, r); err != nil { + return err + } + } + + return errSchedulerIsNotRunning +} + +func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer, req *schedulerRequest) error { + // Make sure to cancel request at the end to cleanup resources. + defer s.cancelRequestAndRemoveFromPending(req.frontendAddress, req.queryID) + + // Handle the stream sending & receiving on a goroutine so we can + // monitoring the contexts in a select and cancel things appropriately. + errCh := make(chan error, 1) + go func() { + err := querier.Send(&schedulerpb.SchedulerToQuerier{ + UserID: req.userID, + QueryID: req.queryID, + FrontendAddress: req.frontendAddress, + HttpRequest: req.request, + }) + if err != nil { + errCh <- err + return + } + + _, err = querier.Recv() + errCh <- err + }() + + select { + case <-req.ctx.Done(): + // If the upstream request is cancelled (eg. frontend issued CANCEL or closed connection), + // we need to cancel the downstream req. Only way we can do that is to close the stream (by returning error here). + // Querier is expecting this semantics. + return req.ctx.Err() + + case err := <-errCh: + // Is there was an error handling this request due to network IO, + // then error out this upstream request _and_ stream. + + if err != nil { + s.forwardErrorToFrontend(req.ctx, req, err) + } + return err + } +} + +func (s *Scheduler) forwardErrorToFrontend(ctx context.Context, req *schedulerRequest, requestErr error) { + opts, err := s.cfg.GRPCClientConfig.DialOption([]grpc.UnaryClientInterceptor{ + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor}, + nil) + if err != nil { + level.Warn(s.log).Log("msg", "failed to create gRPC options for the connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) + return + } + + conn, err := grpc.DialContext(ctx, req.frontendAddress, opts...) + if err != nil { + level.Warn(s.log).Log("msg", "failed to create gRPC connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) + return + } + + defer func() { + _ = conn.Close() + }() + + client := frontendv2pb.NewFrontendForQuerierClient(conn) + + userCtx := user.InjectOrgID(ctx, req.userID) + _, err = client.QueryResult(userCtx, &frontendv2pb.QueryResultRequest{ + QueryID: req.queryID, + HttpResponse: &httpgrpc.HTTPResponse{ + Code: http.StatusInternalServerError, + Body: []byte(requestErr.Error()), + }, + }) + + if err != nil { + level.Warn(s.log).Log("msg", "failed to forward error to frontend", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr) + return + } +} + +func (s *Scheduler) isRunningOrStopping() bool { + st := s.State() + return st == services.Running || st == services.Stopping +} + +// Close the Scheduler. +func (s *Scheduler) stopping(_ error) error { + s.requestQueue.Stop() + return nil +} + +func (s *Scheduler) getConnectedFrontendClientsMetric() float64 { + s.connectedFrontendsMu.Lock() + defer s.connectedFrontendsMu.Unlock() + + count := 0 + for _, workers := range s.connectedFrontends { + count += workers.connections + } + + return float64(count) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go new file mode 100644 index 0000000000000..a2698e5749a9b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go @@ -0,0 +1,1809 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: scheduler.proto + +package schedulerpb + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + httpgrpc "github.com/weaveworks/common/httpgrpc" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FrontendToSchedulerType int32 + +const ( + INIT FrontendToSchedulerType = 0 + ENQUEUE FrontendToSchedulerType = 1 + CANCEL FrontendToSchedulerType = 2 +) + +var FrontendToSchedulerType_name = map[int32]string{ + 0: "INIT", + 1: "ENQUEUE", + 2: "CANCEL", +} + +var FrontendToSchedulerType_value = map[string]int32{ + "INIT": 0, + "ENQUEUE": 1, + "CANCEL": 2, +} + +func (FrontendToSchedulerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{0} +} + +type SchedulerToFrontendStatus int32 + +const ( + OK SchedulerToFrontendStatus = 0 + TOO_MANY_REQUESTS_PER_TENANT SchedulerToFrontendStatus = 1 + ERROR SchedulerToFrontendStatus = 2 + SHUTTING_DOWN SchedulerToFrontendStatus = 3 +) + +var SchedulerToFrontendStatus_name = map[int32]string{ + 0: "OK", + 1: "TOO_MANY_REQUESTS_PER_TENANT", + 2: "ERROR", + 3: "SHUTTING_DOWN", +} + +var SchedulerToFrontendStatus_value = map[string]int32{ + "OK": 0, + "TOO_MANY_REQUESTS_PER_TENANT": 1, + "ERROR": 2, + "SHUTTING_DOWN": 3, +} + +func (SchedulerToFrontendStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{1} +} + +// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. +// To signal that querier is ready to accept another request, querier sends empty message. +type QuerierToScheduler struct { + QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` +} + +func (m *QuerierToScheduler) Reset() { *m = QuerierToScheduler{} } +func (*QuerierToScheduler) ProtoMessage() {} +func (*QuerierToScheduler) Descriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{0} +} +func (m *QuerierToScheduler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuerierToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuerierToScheduler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuerierToScheduler) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuerierToScheduler.Merge(m, src) +} +func (m *QuerierToScheduler) XXX_Size() int { + return m.Size() +} +func (m *QuerierToScheduler) XXX_DiscardUnknown() { + xxx_messageInfo_QuerierToScheduler.DiscardUnknown(m) +} + +var xxx_messageInfo_QuerierToScheduler proto.InternalMessageInfo + +func (m *QuerierToScheduler) GetQuerierID() string { + if m != nil { + return m.QuerierID + } + return "" +} + +type SchedulerToQuerier struct { + // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), + // it identifies the query by using this ID. + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` + // Where should querier send HTTP Response to (using FrontendForQuerier interface). + FrontendAddress string `protobuf:"bytes,3,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` + // User who initiated the request. Needed to send reply back to frontend. + UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` +} + +func (m *SchedulerToQuerier) Reset() { *m = SchedulerToQuerier{} } +func (*SchedulerToQuerier) ProtoMessage() {} +func (*SchedulerToQuerier) Descriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{1} +} +func (m *SchedulerToQuerier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SchedulerToQuerier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SchedulerToQuerier.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SchedulerToQuerier) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchedulerToQuerier.Merge(m, src) +} +func (m *SchedulerToQuerier) XXX_Size() int { + return m.Size() +} +func (m *SchedulerToQuerier) XXX_DiscardUnknown() { + xxx_messageInfo_SchedulerToQuerier.DiscardUnknown(m) +} + +var xxx_messageInfo_SchedulerToQuerier proto.InternalMessageInfo + +func (m *SchedulerToQuerier) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *SchedulerToQuerier) GetHttpRequest() *httpgrpc.HTTPRequest { + if m != nil { + return m.HttpRequest + } + return nil +} + +func (m *SchedulerToQuerier) GetFrontendAddress() string { + if m != nil { + return m.FrontendAddress + } + return "" +} + +func (m *SchedulerToQuerier) GetUserID() string { + if m != nil { + return m.UserID + } + return "" +} + +type FrontendToScheduler struct { + Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=schedulerpb.FrontendToSchedulerType" json:"type,omitempty"` + // Used by INIT message. Will be put into all requests passed to querier. + FrontendAddress string `protobuf:"bytes,2,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` + // Used by ENQUEUE and CANCEL. + // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. + QueryID uint64 `protobuf:"varint,3,opt,name=queryID,proto3" json:"queryID,omitempty"` + // Following are used by ENQUEUE only. + UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` + HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,5,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` +} + +func (m *FrontendToScheduler) Reset() { *m = FrontendToScheduler{} } +func (*FrontendToScheduler) ProtoMessage() {} +func (*FrontendToScheduler) Descriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{2} +} +func (m *FrontendToScheduler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FrontendToScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FrontendToScheduler.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FrontendToScheduler) XXX_Merge(src proto.Message) { + xxx_messageInfo_FrontendToScheduler.Merge(m, src) +} +func (m *FrontendToScheduler) XXX_Size() int { + return m.Size() +} +func (m *FrontendToScheduler) XXX_DiscardUnknown() { + xxx_messageInfo_FrontendToScheduler.DiscardUnknown(m) +} + +var xxx_messageInfo_FrontendToScheduler proto.InternalMessageInfo + +func (m *FrontendToScheduler) GetType() FrontendToSchedulerType { + if m != nil { + return m.Type + } + return INIT +} + +func (m *FrontendToScheduler) GetFrontendAddress() string { + if m != nil { + return m.FrontendAddress + } + return "" +} + +func (m *FrontendToScheduler) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *FrontendToScheduler) GetUserID() string { + if m != nil { + return m.UserID + } + return "" +} + +func (m *FrontendToScheduler) GetHttpRequest() *httpgrpc.HTTPRequest { + if m != nil { + return m.HttpRequest + } + return nil +} + +type SchedulerToFrontend struct { + Status SchedulerToFrontendStatus `protobuf:"varint,1,opt,name=status,proto3,enum=schedulerpb.SchedulerToFrontendStatus" json:"status,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SchedulerToFrontend) Reset() { *m = SchedulerToFrontend{} } +func (*SchedulerToFrontend) ProtoMessage() {} +func (*SchedulerToFrontend) Descriptor() ([]byte, []int) { + return fileDescriptor_2b3fc28395a6d9c5, []int{3} +} +func (m *SchedulerToFrontend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SchedulerToFrontend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SchedulerToFrontend.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SchedulerToFrontend) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchedulerToFrontend.Merge(m, src) +} +func (m *SchedulerToFrontend) XXX_Size() int { + return m.Size() +} +func (m *SchedulerToFrontend) XXX_DiscardUnknown() { + xxx_messageInfo_SchedulerToFrontend.DiscardUnknown(m) +} + +var xxx_messageInfo_SchedulerToFrontend proto.InternalMessageInfo + +func (m *SchedulerToFrontend) GetStatus() SchedulerToFrontendStatus { + if m != nil { + return m.Status + } + return OK +} + +func (m *SchedulerToFrontend) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterEnum("schedulerpb.FrontendToSchedulerType", FrontendToSchedulerType_name, FrontendToSchedulerType_value) + proto.RegisterEnum("schedulerpb.SchedulerToFrontendStatus", SchedulerToFrontendStatus_name, SchedulerToFrontendStatus_value) + proto.RegisterType((*QuerierToScheduler)(nil), "schedulerpb.QuerierToScheduler") + proto.RegisterType((*SchedulerToQuerier)(nil), "schedulerpb.SchedulerToQuerier") + proto.RegisterType((*FrontendToScheduler)(nil), "schedulerpb.FrontendToScheduler") + proto.RegisterType((*SchedulerToFrontend)(nil), "schedulerpb.SchedulerToFrontend") +} + +func init() { proto.RegisterFile("scheduler.proto", fileDescriptor_2b3fc28395a6d9c5) } + +var fileDescriptor_2b3fc28395a6d9c5 = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x5d, 0x6f, 0x12, 0x41, + 0x14, 0xdd, 0xa1, 0x40, 0xe5, 0xa2, 0x76, 0x9d, 0x56, 0x45, 0xd2, 0x4c, 0x09, 0x31, 0x86, 0x34, + 0x11, 0x0c, 0x9a, 0xe8, 0x83, 0x31, 0xc1, 0x76, 0x6b, 0x89, 0x75, 0x29, 0xc3, 0x10, 0x3f, 0x5e, + 0x48, 0x81, 0x29, 0x34, 0x2d, 0xcc, 0x76, 0x76, 0xd7, 0x86, 0x37, 0x7f, 0x82, 0x3f, 0x43, 0xff, + 0x89, 0x8f, 0x3c, 0xf6, 0x51, 0x16, 0x1f, 0x7c, 0xec, 0x4f, 0x30, 0x1d, 0x96, 0x75, 0xa9, 0x90, + 0xfa, 0x76, 0xef, 0xdd, 0x73, 0x72, 0xce, 0x3d, 0x33, 0x3b, 0xb0, 0x62, 0xb7, 0xba, 0xbc, 0xed, + 0x9e, 0x70, 0x99, 0xb7, 0xa4, 0x70, 0x04, 0x4e, 0x06, 0x03, 0xab, 0x99, 0x7e, 0xdc, 0x39, 0x72, + 0xba, 0x6e, 0x33, 0xdf, 0x12, 0xbd, 0x42, 0x47, 0x74, 0x44, 0x41, 0x61, 0x9a, 0xee, 0xa1, 0xea, + 0x54, 0xa3, 0xaa, 0x09, 0x37, 0xfd, 0x2c, 0x04, 0x3f, 0xe3, 0x07, 0x9f, 0xf9, 0x99, 0x90, 0xc7, + 0x76, 0xa1, 0x25, 0x7a, 0x3d, 0xd1, 0x2f, 0x74, 0x1d, 0xc7, 0xea, 0x48, 0xab, 0x15, 0x14, 0x13, + 0x56, 0xb6, 0x08, 0xb8, 0xea, 0x72, 0x79, 0xc4, 0x25, 0x13, 0xb5, 0xa9, 0x38, 0x5e, 0x87, 0xc4, + 0xe9, 0x64, 0x5a, 0xde, 0x4e, 0xa1, 0x0c, 0xca, 0x25, 0xe8, 0xdf, 0x41, 0xf6, 0x3b, 0x02, 0x1c, + 0x60, 0x99, 0xf0, 0xf9, 0x38, 0x05, 0xcb, 0x97, 0x98, 0x81, 0x4f, 0x89, 0xd2, 0x69, 0x8b, 0x9f, + 0x43, 0xf2, 0x52, 0x96, 0xf2, 0x53, 0x97, 0xdb, 0x4e, 0x2a, 0x92, 0x41, 0xb9, 0x64, 0xf1, 0x6e, + 0x3e, 0xb0, 0xb2, 0xcb, 0xd8, 0xbe, 0xff, 0x91, 0x86, 0x91, 0x38, 0x07, 0x2b, 0x87, 0x52, 0xf4, + 0x1d, 0xde, 0x6f, 0x97, 0xda, 0x6d, 0xc9, 0x6d, 0x3b, 0xb5, 0xa4, 0xdc, 0x5c, 0x1d, 0xe3, 0x7b, + 0x10, 0x77, 0x6d, 0x65, 0x37, 0xaa, 0x00, 0x7e, 0x97, 0xfd, 0x85, 0x60, 0x75, 0xc7, 0xc7, 0x86, + 0x37, 0x7c, 0x01, 0x51, 0x67, 0x60, 0x71, 0xe5, 0xf4, 0x76, 0xf1, 0x61, 0x3e, 0x14, 0x7c, 0x7e, + 0x0e, 0x9e, 0x0d, 0x2c, 0x4e, 0x15, 0x63, 0x9e, 0xa7, 0xc8, 0x7c, 0x4f, 0xa1, 0x40, 0x96, 0x66, + 0x03, 0x59, 0xe0, 0xf6, 0x6a, 0x50, 0xb1, 0xff, 0x0d, 0x2a, 0x7b, 0x0c, 0xab, 0xa1, 0x13, 0x99, + 0x2e, 0x80, 0x5f, 0x41, 0xdc, 0x76, 0x0e, 0x1c, 0xd7, 0xf6, 0xf7, 0x7c, 0x34, 0xb3, 0xe7, 0x1c, + 0x46, 0x4d, 0xa1, 0xa9, 0xcf, 0xc2, 0x6b, 0x10, 0xe3, 0x52, 0x0a, 0xe9, 0x6f, 0x38, 0x69, 0x36, + 0x5f, 0xc2, 0xfd, 0x05, 0x11, 0xe1, 0x1b, 0x10, 0x2d, 0x9b, 0x65, 0xa6, 0x6b, 0x38, 0x09, 0xcb, + 0x86, 0x59, 0xad, 0x1b, 0x75, 0x43, 0x47, 0x18, 0x20, 0xbe, 0x55, 0x32, 0xb7, 0x8c, 0x3d, 0x3d, + 0xb2, 0xd9, 0x82, 0x07, 0x0b, 0x85, 0x71, 0x1c, 0x22, 0x95, 0xb7, 0xba, 0x86, 0x33, 0xb0, 0xce, + 0x2a, 0x95, 0xc6, 0xbb, 0x92, 0xf9, 0xb1, 0x41, 0x8d, 0x6a, 0xdd, 0xa8, 0xb1, 0x5a, 0x63, 0xdf, + 0xa0, 0x0d, 0x66, 0x98, 0x25, 0x93, 0xe9, 0x08, 0x27, 0x20, 0x66, 0x50, 0x5a, 0xa1, 0x7a, 0x04, + 0xdf, 0x81, 0x5b, 0xb5, 0xdd, 0x3a, 0x63, 0x65, 0xf3, 0x4d, 0x63, 0xbb, 0xf2, 0xde, 0xd4, 0x97, + 0x8a, 0x27, 0xa1, 0x3c, 0x76, 0x84, 0x9c, 0x5e, 0xd1, 0x3a, 0x24, 0xfd, 0x72, 0x4f, 0x08, 0x0b, + 0x6f, 0xcc, 0xc4, 0xf1, 0xef, 0x7f, 0x90, 0xde, 0x58, 0x94, 0x97, 0x8f, 0xcd, 0x6a, 0x39, 0xf4, + 0x04, 0x15, 0x2d, 0x58, 0x0b, 0xab, 0x05, 0xf1, 0x7f, 0x80, 0x9b, 0xd3, 0x5a, 0xe9, 0x65, 0xae, + 0xbb, 0x66, 0xe9, 0xcc, 0x75, 0x07, 0x34, 0x51, 0x7c, 0x5d, 0x1a, 0x8e, 0x88, 0x76, 0x3e, 0x22, + 0xda, 0xc5, 0x88, 0xa0, 0x2f, 0x1e, 0x41, 0xdf, 0x3c, 0x82, 0x7e, 0x78, 0x04, 0x0d, 0x3d, 0x82, + 0x7e, 0x7a, 0x04, 0xfd, 0xf6, 0x88, 0x76, 0xe1, 0x11, 0xf4, 0x75, 0x4c, 0xb4, 0xe1, 0x98, 0x68, + 0xe7, 0x63, 0xa2, 0x7d, 0x0a, 0x3f, 0x2f, 0xcd, 0xb8, 0x7a, 0x00, 0x9e, 0xfe, 0x09, 0x00, 0x00, + 0xff, 0xff, 0x89, 0xbf, 0xda, 0x9a, 0x85, 0x04, 0x00, 0x00, +} + +func (x FrontendToSchedulerType) String() string { + s, ok := FrontendToSchedulerType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x SchedulerToFrontendStatus) String() string { + s, ok := SchedulerToFrontendStatus_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *QuerierToScheduler) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuerierToScheduler) + if !ok { + that2, ok := that.(QuerierToScheduler) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QuerierID != that1.QuerierID { + return false + } + return true +} +func (this *SchedulerToQuerier) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SchedulerToQuerier) + if !ok { + that2, ok := that.(SchedulerToQuerier) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if !this.HttpRequest.Equal(that1.HttpRequest) { + return false + } + if this.FrontendAddress != that1.FrontendAddress { + return false + } + if this.UserID != that1.UserID { + return false + } + return true +} +func (this *FrontendToScheduler) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FrontendToScheduler) + if !ok { + that2, ok := that.(FrontendToScheduler) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.FrontendAddress != that1.FrontendAddress { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if this.UserID != that1.UserID { + return false + } + if !this.HttpRequest.Equal(that1.HttpRequest) { + return false + } + return true +} +func (this *SchedulerToFrontend) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SchedulerToFrontend) + if !ok { + that2, ok := that.(SchedulerToFrontend) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Status != that1.Status { + return false + } + if this.Error != that1.Error { + return false + } + return true +} +func (this *QuerierToScheduler) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&schedulerpb.QuerierToScheduler{") + s = append(s, "QuerierID: "+fmt.Sprintf("%#v", this.QuerierID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SchedulerToQuerier) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&schedulerpb.SchedulerToQuerier{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + if this.HttpRequest != nil { + s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") + } + s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") + s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FrontendToScheduler) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&schedulerpb.FrontendToScheduler{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") + if this.HttpRequest != nil { + s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SchedulerToFrontend) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&schedulerpb.SchedulerToFrontend{") + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringScheduler(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SchedulerForQuerierClient is the client API for SchedulerForQuerier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SchedulerForQuerierClient interface { + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) +} + +type schedulerForQuerierClient struct { + cc *grpc.ClientConn +} + +func NewSchedulerForQuerierClient(cc *grpc.ClientConn) SchedulerForQuerierClient { + return &schedulerForQuerierClient{cc} +} + +func (c *schedulerForQuerierClient) QuerierLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForQuerier_QuerierLoopClient, error) { + stream, err := c.cc.NewStream(ctx, &_SchedulerForQuerier_serviceDesc.Streams[0], "/schedulerpb.SchedulerForQuerier/QuerierLoop", opts...) + if err != nil { + return nil, err + } + x := &schedulerForQuerierQuerierLoopClient{stream} + return x, nil +} + +type SchedulerForQuerier_QuerierLoopClient interface { + Send(*QuerierToScheduler) error + Recv() (*SchedulerToQuerier, error) + grpc.ClientStream +} + +type schedulerForQuerierQuerierLoopClient struct { + grpc.ClientStream +} + +func (x *schedulerForQuerierQuerierLoopClient) Send(m *QuerierToScheduler) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerForQuerierQuerierLoopClient) Recv() (*SchedulerToQuerier, error) { + m := new(SchedulerToQuerier) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SchedulerForQuerierServer is the server API for SchedulerForQuerier service. +type SchedulerForQuerierServer interface { + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + QuerierLoop(SchedulerForQuerier_QuerierLoopServer) error +} + +// UnimplementedSchedulerForQuerierServer can be embedded to have forward compatible implementations. +type UnimplementedSchedulerForQuerierServer struct { +} + +func (*UnimplementedSchedulerForQuerierServer) QuerierLoop(srv SchedulerForQuerier_QuerierLoopServer) error { + return status.Errorf(codes.Unimplemented, "method QuerierLoop not implemented") +} + +func RegisterSchedulerForQuerierServer(s *grpc.Server, srv SchedulerForQuerierServer) { + s.RegisterService(&_SchedulerForQuerier_serviceDesc, srv) +} + +func _SchedulerForQuerier_QuerierLoop_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerForQuerierServer).QuerierLoop(&schedulerForQuerierQuerierLoopServer{stream}) +} + +type SchedulerForQuerier_QuerierLoopServer interface { + Send(*SchedulerToQuerier) error + Recv() (*QuerierToScheduler, error) + grpc.ServerStream +} + +type schedulerForQuerierQuerierLoopServer struct { + grpc.ServerStream +} + +func (x *schedulerForQuerierQuerierLoopServer) Send(m *SchedulerToQuerier) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerForQuerierQuerierLoopServer) Recv() (*QuerierToScheduler, error) { + m := new(QuerierToScheduler) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SchedulerForQuerier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "schedulerpb.SchedulerForQuerier", + HandlerType: (*SchedulerForQuerierServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "QuerierLoop", + Handler: _SchedulerForQuerier_QuerierLoop_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "scheduler.proto", +} + +// SchedulerForFrontendClient is the client API for SchedulerForFrontend service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SchedulerForFrontendClient interface { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) +} + +type schedulerForFrontendClient struct { + cc *grpc.ClientConn +} + +func NewSchedulerForFrontendClient(cc *grpc.ClientConn) SchedulerForFrontendClient { + return &schedulerForFrontendClient{cc} +} + +func (c *schedulerForFrontendClient) FrontendLoop(ctx context.Context, opts ...grpc.CallOption) (SchedulerForFrontend_FrontendLoopClient, error) { + stream, err := c.cc.NewStream(ctx, &_SchedulerForFrontend_serviceDesc.Streams[0], "/schedulerpb.SchedulerForFrontend/FrontendLoop", opts...) + if err != nil { + return nil, err + } + x := &schedulerForFrontendFrontendLoopClient{stream} + return x, nil +} + +type SchedulerForFrontend_FrontendLoopClient interface { + Send(*FrontendToScheduler) error + Recv() (*SchedulerToFrontend, error) + grpc.ClientStream +} + +type schedulerForFrontendFrontendLoopClient struct { + grpc.ClientStream +} + +func (x *schedulerForFrontendFrontendLoopClient) Send(m *FrontendToScheduler) error { + return x.ClientStream.SendMsg(m) +} + +func (x *schedulerForFrontendFrontendLoopClient) Recv() (*SchedulerToFrontend, error) { + m := new(SchedulerToFrontend) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SchedulerForFrontendServer is the server API for SchedulerForFrontend service. +type SchedulerForFrontendServer interface { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + FrontendLoop(SchedulerForFrontend_FrontendLoopServer) error +} + +// UnimplementedSchedulerForFrontendServer can be embedded to have forward compatible implementations. +type UnimplementedSchedulerForFrontendServer struct { +} + +func (*UnimplementedSchedulerForFrontendServer) FrontendLoop(srv SchedulerForFrontend_FrontendLoopServer) error { + return status.Errorf(codes.Unimplemented, "method FrontendLoop not implemented") +} + +func RegisterSchedulerForFrontendServer(s *grpc.Server, srv SchedulerForFrontendServer) { + s.RegisterService(&_SchedulerForFrontend_serviceDesc, srv) +} + +func _SchedulerForFrontend_FrontendLoop_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SchedulerForFrontendServer).FrontendLoop(&schedulerForFrontendFrontendLoopServer{stream}) +} + +type SchedulerForFrontend_FrontendLoopServer interface { + Send(*SchedulerToFrontend) error + Recv() (*FrontendToScheduler, error) + grpc.ServerStream +} + +type schedulerForFrontendFrontendLoopServer struct { + grpc.ServerStream +} + +func (x *schedulerForFrontendFrontendLoopServer) Send(m *SchedulerToFrontend) error { + return x.ServerStream.SendMsg(m) +} + +func (x *schedulerForFrontendFrontendLoopServer) Recv() (*FrontendToScheduler, error) { + m := new(FrontendToScheduler) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SchedulerForFrontend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "schedulerpb.SchedulerForFrontend", + HandlerType: (*SchedulerForFrontendServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "FrontendLoop", + Handler: _SchedulerForFrontend_FrontendLoop_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "scheduler.proto", +} + +func (m *QuerierToScheduler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuerierToScheduler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuerierToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QuerierID) > 0 { + i -= len(m.QuerierID) + copy(dAtA[i:], m.QuerierID) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.QuerierID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchedulerToQuerier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerToQuerier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchedulerToQuerier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UserID) > 0 { + i -= len(m.UserID) + copy(dAtA[i:], m.UserID) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.UserID))) + i-- + dAtA[i] = 0x22 + } + if len(m.FrontendAddress) > 0 { + i -= len(m.FrontendAddress) + copy(dAtA[i:], m.FrontendAddress) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.FrontendAddress))) + i-- + dAtA[i] = 0x1a + } + if m.HttpRequest != nil { + { + size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintScheduler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.QueryID != 0 { + i = encodeVarintScheduler(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FrontendToScheduler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FrontendToScheduler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FrontendToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HttpRequest != nil { + { + size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintScheduler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.UserID) > 0 { + i -= len(m.UserID) + copy(dAtA[i:], m.UserID) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.UserID))) + i-- + dAtA[i] = 0x22 + } + if m.QueryID != 0 { + i = encodeVarintScheduler(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x18 + } + if len(m.FrontendAddress) > 0 { + i -= len(m.FrontendAddress) + copy(dAtA[i:], m.FrontendAddress) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.FrontendAddress))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintScheduler(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SchedulerToFrontend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerToFrontend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchedulerToFrontend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = encodeVarintScheduler(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintScheduler(dAtA []byte, offset int, v uint64) int { + offset -= sovScheduler(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QuerierToScheduler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QuerierID) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + return n +} + +func (m *SchedulerToQuerier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovScheduler(uint64(m.QueryID)) + } + if m.HttpRequest != nil { + l = m.HttpRequest.Size() + n += 1 + l + sovScheduler(uint64(l)) + } + l = len(m.FrontendAddress) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + l = len(m.UserID) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + return n +} + +func (m *FrontendToScheduler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovScheduler(uint64(m.Type)) + } + l = len(m.FrontendAddress) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + if m.QueryID != 0 { + n += 1 + sovScheduler(uint64(m.QueryID)) + } + l = len(m.UserID) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + if m.HttpRequest != nil { + l = m.HttpRequest.Size() + n += 1 + l + sovScheduler(uint64(l)) + } + return n +} + +func (m *SchedulerToFrontend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovScheduler(uint64(m.Status)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } + return n +} + +func sovScheduler(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozScheduler(x uint64) (n int) { + return sovScheduler(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *QuerierToScheduler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuerierToScheduler{`, + `QuerierID:` + fmt.Sprintf("%v", this.QuerierID) + `,`, + `}`, + }, "") + return s +} +func (this *SchedulerToQuerier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchedulerToQuerier{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, + `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, + `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, + `}`, + }, "") + return s +} +func (this *FrontendToScheduler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FrontendToScheduler{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, + `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SchedulerToFrontend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchedulerToFrontend{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func valueToStringScheduler(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *QuerierToScheduler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuerierToScheduler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuerierToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuerierID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuerierID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipScheduler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerToQuerier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerToQuerier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerToQuerier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpRequest == nil { + m.HttpRequest = &httpgrpc.HTTPRequest{} + } + if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FrontendAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipScheduler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FrontendToScheduler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FrontendToScheduler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FrontendToScheduler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= FrontendToSchedulerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FrontendAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HttpRequest == nil { + m.HttpRequest = &httpgrpc.HTTPRequest{} + } + if err := m.HttpRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipScheduler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerToFrontend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerToFrontend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerToFrontend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= SchedulerToFrontendStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipScheduler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipScheduler(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowScheduler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowScheduler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowScheduler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthScheduler + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthScheduler + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowScheduler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipScheduler(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthScheduler + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthScheduler = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowScheduler = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto new file mode 100644 index 0000000000000..62fab0d408fc5 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package schedulerpb; + +option go_package = "schedulerpb"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/weaveworks/common/httpgrpc/httpgrpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Scheduler interface exposed to Queriers. +service SchedulerForQuerier { + // After calling this method, both Querier and Scheduler enter a loop, in which querier waits for + // "SchedulerToQuerier" messages containing HTTP requests and processes them. After processing the request, + // querier signals that it is ready to accept another one by sending empty QuerierToScheduler message. + // + // Long-running loop is used to detect broken connection between scheduler and querier. This is important + // for scheduler to keep a list of connected queriers up-to-date. + rpc QuerierLoop(stream QuerierToScheduler) returns (stream SchedulerToQuerier) { }; +} + +// Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. +// To signal that querier is ready to accept another request, querier sends empty message. +message QuerierToScheduler { + string querierID = 1; +} + +message SchedulerToQuerier { + // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), + // it identifies the query by using this ID. + uint64 queryID = 1; + httpgrpc.HTTPRequest httpRequest = 2; + + // Where should querier send HTTP Response to (using FrontendForQuerier interface). + string frontendAddress = 3; + + // User who initiated the request. Needed to send reply back to frontend. + string userID = 4; +} + +// Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. +service SchedulerForFrontend { + // After calling this method, both Frontend and Scheduler enter a loop. Frontend will keep sending ENQUEUE and + // CANCEL requests, and scheduler is expected to process them. Scheduler returns one response for each request. + // + // Long-running loop is used to detect broken connection between frontend and scheduler. This is important for both + // parties... if connection breaks, frontend can cancel (and possibly retry on different scheduler) all pending + // requests sent to this scheduler, while scheduler can cancel queued requests from given frontend. + rpc FrontendLoop(stream FrontendToScheduler) returns (stream SchedulerToFrontend) { }; +} + +enum FrontendToSchedulerType { + INIT = 0; + ENQUEUE = 1; + CANCEL = 2; +} + +message FrontendToScheduler { + FrontendToSchedulerType type = 1; + + // Used by INIT message. Will be put into all requests passed to querier. + string frontendAddress = 2; + + // Used by ENQUEUE and CANCEL. + // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. + uint64 queryID = 3; + + // Following are used by ENQUEUE only. + string userID = 4; + httpgrpc.HTTPRequest httpRequest = 5; +} + +enum SchedulerToFrontendStatus { + OK = 0; + TOO_MANY_REQUESTS_PER_TENANT = 1; + ERROR = 2; + SHUTTING_DOWN = 3; +} + +message SchedulerToFrontend { + SchedulerToFrontendStatus status = 1; + string error = 2; +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go similarity index 100% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/bucket_client.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/config.go similarity index 86% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/config.go index 547c70075abf6..3162d5f76948d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/config.go @@ -15,12 +15,12 @@ type Config struct { MaxRetries int `yaml:"max_retries"` } -// RegisterFlags registers the flags for TSDB Azure storage +// RegisterFlags registers the flags for Azure storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("", f) } -// RegisterFlagsWithPrefix registers the flags for TSDB Azure storage +// RegisterFlagsWithPrefix registers the flags for Azure storage func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.StorageAccountName, prefix+"azure.account-name", "", "Azure storage account name") f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key") diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go new file mode 100644 index 0000000000000..6d6805bb10a96 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go @@ -0,0 +1,132 @@ +package bucket + +import ( + "context" + "errors" + "flag" + "fmt" + "strings" + + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/bucket/azure" + "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" + "github.com/cortexproject/cortex/pkg/storage/bucket/gcs" + "github.com/cortexproject/cortex/pkg/storage/bucket/s3" + "github.com/cortexproject/cortex/pkg/storage/bucket/swift" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // S3 is the value for the S3 storage backend. + S3 = "s3" + + // GCS is the value for the GCS storage backend. + GCS = "gcs" + + // Azure is the value for the Azure storage backend. + Azure = "azure" + + // Swift is the value for the Openstack Swift storage backend. + Swift = "swift" + + // Filesystem is the value for the filesystem storage backend. + Filesystem = "filesystem" +) + +var ( + supportedBackends = []string{S3, GCS, Azure, Swift, Filesystem} + + ErrUnsupportedStorageBackend = errors.New("unsupported storage backend") +) + +// Config holds configuration for accessing long-term storage. +type Config struct { + Backend string `yaml:"backend"` + // Backends + S3 s3.Config `yaml:"s3"` + GCS gcs.Config `yaml:"gcs"` + Azure azure.Config `yaml:"azure"` + Swift swift.Config `yaml:"swift"` + Filesystem filesystem.Config `yaml:"filesystem"` + + // Not used internally, meant to allow callers to wrap Buckets + // created using this config + Middlewares []func(objstore.Bucket) (objstore.Bucket, error) `yaml:"-"` +} + +// RegisterFlags registers the backend storage config. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.S3.RegisterFlagsWithPrefix(prefix, f) + cfg.GCS.RegisterFlagsWithPrefix(prefix, f) + cfg.Azure.RegisterFlagsWithPrefix(prefix, f) + cfg.Swift.RegisterFlagsWithPrefix(prefix, f) + cfg.Filesystem.RegisterFlagsWithPrefix(prefix, f) + + f.StringVar(&cfg.Backend, prefix+"backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) +} + +func (cfg *Config) Validate() error { + if !util.StringsContain(supportedBackends, cfg.Backend) { + return ErrUnsupportedStorageBackend + } + + if cfg.Backend == S3 { + if err := cfg.S3.Validate(); err != nil { + return err + } + } + + return nil +} + +// NewClient creates a new bucket client based on the configured backend +func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) { + switch cfg.Backend { + case S3: + client, err = s3.NewBucketClient(cfg.S3, name, logger) + case GCS: + client, err = gcs.NewBucketClient(ctx, cfg.GCS, name, logger) + case Azure: + client, err = azure.NewBucketClient(cfg.Azure, name, logger) + case Swift: + client, err = swift.NewBucketClient(cfg.Swift, name, logger) + case Filesystem: + client, err = filesystem.NewBucketClient(cfg.Filesystem) + default: + return nil, ErrUnsupportedStorageBackend + } + + if err != nil { + return nil, err + } + + client = objstore.NewTracingBucket(bucketWithMetrics(client, name, reg)) + + // Wrap the client with any provided middleware + for _, wrap := range cfg.Middlewares { + client, err = wrap(client) + if err != nil { + return nil, err + } + } + + return client, nil +} + +func bucketWithMetrics(bucketClient objstore.Bucket, name string, reg prometheus.Registerer) objstore.Bucket { + if reg == nil { + return bucketClient + } + + return objstore.BucketWithMetrics( + "", // bucket label value + bucketClient, + prometheus.WrapRegistererWith(prometheus.Labels{"component": name}, reg)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go similarity index 66% rename from vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go index e4d01481098ee..c09fe92f77e15 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go @@ -1,4 +1,4 @@ -package tsdb +package bucket import ( "bytes" @@ -13,42 +13,42 @@ import ( var errObjectDoesNotExist = errors.New("object does not exist") -// BucketClientMock mocks objstore.Bucket -type BucketClientMock struct { +// ClientMock mocks objstore.Bucket +type ClientMock struct { mock.Mock } // Upload mocks objstore.Bucket.Upload() -func (m *BucketClientMock) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader) error { args := m.Called(ctx, name, r) return args.Error(0) } // Delete mocks objstore.Bucket.Delete() -func (m *BucketClientMock) Delete(ctx context.Context, name string) error { +func (m *ClientMock) Delete(ctx context.Context, name string) error { args := m.Called(ctx, name) return args.Error(0) } // Name mocks objstore.Bucket.Name() -func (m *BucketClientMock) Name() string { +func (m *ClientMock) Name() string { return "mock" } // Iter mocks objstore.Bucket.Iter() -func (m *BucketClientMock) Iter(ctx context.Context, dir string, f func(string) error) error { +func (m *ClientMock) Iter(ctx context.Context, dir string, f func(string) error) error { args := m.Called(ctx, dir, f) return args.Error(0) } // MockIter is a convenient method to mock Iter() -func (m *BucketClientMock) MockIter(prefix string, objects []string, err error) { +func (m *ClientMock) MockIter(prefix string, objects []string, err error) { m.MockIterWithCallback(prefix, objects, err, nil) } // MockIterWithCallback is a convenient method to mock Iter() and get a callback called when the Iter // API is called. -func (m *BucketClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) { +func (m *ClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) { m.On("Iter", mock.Anything, prefix, mock.Anything).Return(err).Run(func(args mock.Arguments) { if cb != nil { cb() @@ -65,7 +65,7 @@ func (m *BucketClientMock) MockIterWithCallback(prefix string, objects []string, } // Get mocks objstore.Bucket.Get() -func (m *BucketClientMock) Get(ctx context.Context, name string) (io.ReadCloser, error) { +func (m *ClientMock) Get(ctx context.Context, name string) (io.ReadCloser, error) { args := m.Called(ctx, name) val, err := args.Get(0), args.Error(1) if val == nil { @@ -75,7 +75,7 @@ func (m *BucketClientMock) Get(ctx context.Context, name string) (io.ReadCloser, } // MockGet is a convenient method to mock Get() and Exists() -func (m *BucketClientMock) MockGet(name, content string, err error) { +func (m *ClientMock) MockGet(name, content string, err error) { if content != "" { m.On("Exists", mock.Anything, name).Return(true, err) @@ -92,34 +92,38 @@ func (m *BucketClientMock) MockGet(name, content string, err error) { } } -func (m *BucketClientMock) MockDelete(name string, err error) { +func (m *ClientMock) MockDelete(name string, err error) { m.On("Delete", mock.Anything, name).Return(err) } +func (m *ClientMock) MockExists(name string, exists bool, err error) { + m.On("Exists", mock.Anything, name).Return(exists, err) +} + // GetRange mocks objstore.Bucket.GetRange() -func (m *BucketClientMock) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { +func (m *ClientMock) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { args := m.Called(ctx, name, off, length) return args.Get(0).(io.ReadCloser), args.Error(1) } // Exists mocks objstore.Bucket.Exists() -func (m *BucketClientMock) Exists(ctx context.Context, name string) (bool, error) { +func (m *ClientMock) Exists(ctx context.Context, name string) (bool, error) { args := m.Called(ctx, name) return args.Bool(0), args.Error(1) } // IsObjNotFoundErr mocks objstore.Bucket.IsObjNotFoundErr() -func (m *BucketClientMock) IsObjNotFoundErr(err error) bool { +func (m *ClientMock) IsObjNotFoundErr(err error) bool { return err == errObjectDoesNotExist } // ObjectSize mocks objstore.Bucket.Attributes() -func (m *BucketClientMock) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { +func (m *ClientMock) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { args := m.Called(ctx, name) return args.Get(0).(objstore.ObjectAttributes), args.Error(1) } // Close mocks objstore.Bucket.Close() -func (m *BucketClientMock) Close() error { +func (m *ClientMock) Close() error { return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/filesystem/bucket_client.go similarity index 100% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/bucket_client.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/filesystem/bucket_client.go diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/filesystem/config.go similarity index 65% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/filesystem/config.go index 26a3ebd1c45c6..923923a032906 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/filesystem/config.go @@ -7,12 +7,12 @@ type Config struct { Directory string `yaml:"dir"` } -// RegisterFlags registers the flags for TSDB filesystem storage +// RegisterFlags registers the flags for filesystem storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("", f) } -// RegisterFlagsWithPrefix registers the flags for TSDB filesystem storage with the provided prefix +// RegisterFlagsWithPrefix registers the flags for filesystem storage with the provided prefix func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.Directory, prefix+"filesystem.dir", "", "Local filesystem storage directory.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go similarity index 100% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/bucket_client.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/config.go similarity index 78% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/config.go index 44eb020b12796..3e646a757c47a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/config.go @@ -12,12 +12,12 @@ type Config struct { ServiceAccount flagext.Secret `yaml:"service_account"` } -// RegisterFlags registers the flags for TSDB GCS storage +// RegisterFlags registers the flags for GCS storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("", f) } -// RegisterFlagsWithPrefix registers the flags for TSDB GCS storage with the provided prefix +// RegisterFlagsWithPrefix registers the flags for GCS storage with the provided prefix func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.BucketName, prefix+"gcs.bucket-name", "", "GCS bucket name") f.Var(&cfg.ServiceAccount, prefix+"gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go similarity index 90% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go index 5d3d734885712..51dab86036f7e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go @@ -30,5 +30,7 @@ func newS3Config(cfg Config) s3.Config { InsecureSkipVerify: cfg.HTTP.InsecureSkipVerify, Transport: cfg.HTTP.Transport, }, + // Enforce signature version 2 if CLI flag is set + SignatureV2: cfg.SignatureVersion == SignatureVersionV2, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go similarity index 60% rename from vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go index f4e9874708b4c..96db7e1f0c125 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go @@ -1,13 +1,27 @@ package s3 import ( + "errors" "flag" + "fmt" "net/http" + "strings" "time" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) +const ( + SignatureVersionV4 = "v4" + SignatureVersionV2 = "v2" +) + +var ( + supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2} + errUnsupportedSignatureVersion = errors.New("unsupported signature version") +) + // HTTPConfig stores the http.Transport configuration for the s3 minio client. type HTTPConfig struct { IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"` @@ -18,7 +32,7 @@ type HTTPConfig struct { Transport http.RoundTripper `yaml:"-"` } -// RegisterFlagsWithPrefix registers the flags for TSDB s3 storage with the provided prefix +// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.") f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.") @@ -27,26 +41,36 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { // Config holds the config options for an S3 backend type Config struct { - Endpoint string `yaml:"endpoint"` - BucketName string `yaml:"bucket_name"` - SecretAccessKey flagext.Secret `yaml:"secret_access_key"` - AccessKeyID string `yaml:"access_key_id"` - Insecure bool `yaml:"insecure"` + Endpoint string `yaml:"endpoint"` + BucketName string `yaml:"bucket_name"` + SecretAccessKey flagext.Secret `yaml:"secret_access_key"` + AccessKeyID string `yaml:"access_key_id"` + Insecure bool `yaml:"insecure"` + SignatureVersion string `yaml:"signature_version"` HTTP HTTPConfig `yaml:"http"` } -// RegisterFlags registers the flags for TSDB s3 storage with the provided prefix +// RegisterFlags registers the flags for s3 storage with the provided prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("blocks-storage.", f) + cfg.RegisterFlagsWithPrefix("", f) } -// RegisterFlagsWithPrefix registers the flags for TSDB s3 storage with the provided prefix +// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "S3 access key ID") f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "S3 secret access key") f.StringVar(&cfg.BucketName, prefix+"s3.bucket-name", "", "S3 bucket name") f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") + f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", "))) cfg.HTTP.RegisterFlagsWithPrefix(prefix, f) } + +// Validate config and returns error on failure +func (cfg *Config) Validate() error { + if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) { + return errUnsupportedSignatureVersion + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go new file mode 100644 index 0000000000000..179647dd4fd44 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go @@ -0,0 +1,37 @@ +package swift + +import ( + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/swift" + yaml "gopkg.in/yaml.v2" +) + +// NewBucketClient creates a new Swift bucket client +func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { + bucketConfig := swift.SwiftConfig{ + AuthUrl: cfg.AuthURL, + Username: cfg.Username, + UserDomainName: cfg.UserDomainName, + UserDomainID: cfg.UserDomainID, + UserId: cfg.UserID, + Password: cfg.Password, + DomainId: cfg.DomainID, + DomainName: cfg.DomainName, + ProjectID: cfg.ProjectID, + ProjectName: cfg.ProjectName, + ProjectDomainID: cfg.ProjectDomainID, + ProjectDomainName: cfg.ProjectDomainName, + RegionName: cfg.RegionName, + ContainerName: cfg.ContainerName, + } + + // Thanos currently doesn't support passing the config as is, but expects a YAML, + // so we're going to serialize it. + serialized, err := yaml.Marshal(bucketConfig) + if err != nil { + return nil, err + } + + return swift.NewContainer(logger, serialized) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go new file mode 100644 index 0000000000000..3bc682af7edc6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go @@ -0,0 +1,46 @@ +package swift + +import ( + "flag" +) + +// Config holds the config options for Swift backend +type Config struct { + AuthURL string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserID string `yaml:"user_id"` + Password string `yaml:"password"` + DomainID string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` +} + +// RegisterFlags registers the flags for Swift storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix registers the flags for Swift storage with the provided prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.AuthURL, prefix+"swift.auth-url", "", "OpenStack Swift authentication URL") + f.StringVar(&cfg.Username, prefix+"swift.username", "", "OpenStack Swift username.") + f.StringVar(&cfg.UserDomainName, prefix+"swift.user-domain-name", "", "OpenStack Swift user's domain name.") + f.StringVar(&cfg.UserDomainID, prefix+"swift.user-domain-id", "", "OpenStack Swift user's domain ID.") + f.StringVar(&cfg.UserID, prefix+"swift.user-id", "", "OpenStack Swift user ID.") + f.StringVar(&cfg.Password, prefix+"swift.password", "", "OpenStack Swift API key.") + f.StringVar(&cfg.DomainID, prefix+"swift.domain-id", "", "OpenStack Swift user's domain ID.") + f.StringVar(&cfg.DomainName, prefix+"swift.domain-name", "", "OpenStack Swift user's domain name.") + f.StringVar(&cfg.ProjectID, prefix+"swift.project-id", "", "OpenStack Swift project ID (v2,v3 auth only).") + f.StringVar(&cfg.ProjectName, prefix+"swift.project-name", "", "OpenStack Swift project name (v2,v3 auth only).") + f.StringVar(&cfg.ProjectDomainID, prefix+"swift.project-domain-id", "", "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.") + f.StringVar(&cfg.ProjectDomainName, prefix+"swift.project-domain-name", "", "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.") + f.StringVar(&cfg.RegionName, prefix+"swift.region-name", "", "OpenStack Swift Region to use (v2,v3 auth only).") + f.StringVar(&cfg.ContainerName, prefix+"swift.container-name", "", "Name of the OpenStack Swift container to put chunks in.") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/user_bucket_client.go similarity index 99% rename from vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/bucket/user_bucket_client.go index a06877f62534a..27d5b95e76a1f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/user_bucket_client.go @@ -1,4 +1,4 @@ -package tsdb +package bucket import ( "context" diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go deleted file mode 100644 index 999fb550a5879..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go +++ /dev/null @@ -1,57 +0,0 @@ -package tsdb - -import ( - "context" - - "github.com/go-kit/kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" - - "github.com/cortexproject/cortex/pkg/storage/backend/azure" - "github.com/cortexproject/cortex/pkg/storage/backend/filesystem" - "github.com/cortexproject/cortex/pkg/storage/backend/gcs" - "github.com/cortexproject/cortex/pkg/storage/backend/s3" -) - -// NewBucketClient creates a new bucket client based on the configured backend -func NewBucketClient(ctx context.Context, cfg BucketConfig, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) { - switch cfg.Backend { - case BackendS3: - client, err = s3.NewBucketClient(cfg.S3, name, logger) - case BackendGCS: - client, err = gcs.NewBucketClient(ctx, cfg.GCS, name, logger) - case BackendAzure: - client, err = azure.NewBucketClient(cfg.Azure, name, logger) - case BackendFilesystem: - client, err = filesystem.NewBucketClient(cfg.Filesystem) - default: - return nil, errUnsupportedStorageBackend - } - - if err != nil { - return nil, err - } - - client = objstore.NewTracingBucket(bucketWithMetrics(client, name, reg)) - - // Wrap the client with any provided middleware - for _, wrap := range cfg.Middlewares { - client, err = wrap(client) - if err != nil { - return nil, err - } - } - - return client, nil -} - -func bucketWithMetrics(bucketClient objstore.Bucket, name string, reg prometheus.Registerer) objstore.Bucket { - if reg == nil { - return bucketClient - } - - return objstore.BucketWithMetrics( - "", // bucket label value - bucketClient, - prometheus.WrapRegistererWith(prometheus.Labels{"component": name}, reg)) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go new file mode 100644 index 0000000000000..a6600389bef6c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go @@ -0,0 +1,199 @@ +package bucketindex + +import ( + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + IndexFilename = "bucket-index.json" + IndexCompressedFilename = IndexFilename + ".gz" + IndexVersion1 = 1 + + SegmentsFormatUnknown = "" + + // SegmentsFormat1Based6Digits defined segments numbered with 6 digits numbers in a sequence starting from number 1 + // eg. (000001, 000002, 000003). + SegmentsFormat1Based6Digits = "1b6d" +) + +// Index contains all known blocks and markers of a tenant. +type Index struct { + // Version of the index format. + Version int `json:"version"` + + // List of complete blocks (partial blocks are excluded from the index). + Blocks []*Block `json:"blocks"` + + // List of block deletion marks. + BlockDeletionMarks []*BlockDeletionMark `json:"block_deletion_marks"` + + // UpdatedAt is a unix timestamp (seconds precision) of when the index has been updated + // (written in the storage) the last time. + UpdatedAt int64 `json:"updated_at"` +} + +// Block holds the information about a block in the index. +type Block struct { + // Block ID. + ID ulid.ULID `json:"block_id"` + + // MinTime and MaxTime specify the time range all samples in the block are in (millis precision). + MinTime int64 `json:"min_time"` + MaxTime int64 `json:"max_time"` + + // SegmentsFormat and SegmentsNum stores the format and number of chunks segments + // in the block, if they match a known pattern. We don't store the full segments + // files list in order to keep the index small. SegmentsFormat is empty if segments + // are unknown or don't match a known format. + SegmentsFormat string `json:"segments_format,omitempty"` + SegmentsNum int `json:"segments_num,omitempty"` + + // UploadedAt is a unix timestamp (seconds precision) of when the block has been completed to be uploaded + // to the storage. + UploadedAt int64 `json:"uploaded_at"` +} + +func (m *Block) GetUploadedAt() time.Time { + return time.Unix(m.UploadedAt, 0) +} + +// ThanosMeta returns a block meta based on the known information in the index. +// The returned meta doesn't include all original meta.json data but only a subset +// of it. +func (m *Block) ThanosMeta(userID string) metadata.Meta { + return metadata.Meta{ + BlockMeta: tsdb.BlockMeta{ + ULID: m.ID, + MinTime: m.MinTime, + MaxTime: m.MaxTime, + Version: metadata.TSDBVersion1, + }, + Thanos: metadata.Thanos{ + Version: metadata.ThanosVersion1, + Labels: map[string]string{ + cortex_tsdb.TenantIDExternalLabel: userID, + }, + SegmentFiles: m.thanosMetaSegmentFiles(), + }, + } +} + +func (m *Block) thanosMetaSegmentFiles() (files []string) { + if m.SegmentsFormat == SegmentsFormat1Based6Digits { + for i := 1; i <= m.SegmentsNum; i++ { + files = append(files, fmt.Sprintf("%06d", i)) + } + } + + return files +} + +func (m *Block) String() string { + minT := util.TimeFromMillis(m.MinTime).UTC() + maxT := util.TimeFromMillis(m.MaxTime).UTC() + + return fmt.Sprintf("%s (min time: %s max time: %s)", m.ID, minT.String(), maxT.String()) +} + +func BlockFromThanosMeta(meta metadata.Meta) *Block { + segmentsFormat, segmentsNum := detectBlockSegmentsFormat(meta) + + return &Block{ + ID: meta.ULID, + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + SegmentsFormat: segmentsFormat, + SegmentsNum: segmentsNum, + } +} + +func detectBlockSegmentsFormat(meta metadata.Meta) (string, int) { + if num, ok := detectBlockSegmentsFormat1Based6Digits(meta); ok { + return SegmentsFormat1Based6Digits, num + } + + return "", 0 +} + +func detectBlockSegmentsFormat1Based6Digits(meta metadata.Meta) (int, bool) { + // Check the (deprecated) SegmentFiles. + if len(meta.Thanos.SegmentFiles) > 0 { + for i, f := range meta.Thanos.SegmentFiles { + if fmt.Sprintf("%06d", i+1) != f { + return 0, false + } + } + return len(meta.Thanos.SegmentFiles), true + } + + // Check the Files. + if len(meta.Thanos.Files) > 0 { + num := 0 + for _, file := range meta.Thanos.Files { + if !strings.HasPrefix(file.RelPath, block.ChunksDirname+string(filepath.Separator)) { + continue + } + if fmt.Sprintf("%s%s%06d", block.ChunksDirname, string(filepath.Separator), num+1) != file.RelPath { + return 0, false + } + num++ + } + + if num > 0 { + return num, true + } + } + + return 0, false +} + +// BlockDeletionMark holds the information about a block's deletion mark in the index. +type BlockDeletionMark struct { + // Block ID. + ID ulid.ULID `json:"block_id"` + + // DeletionTime is a unix timestamp (seconds precision) of when the block was marked to be deleted. + DeletionTime int64 `json:"deletion_time"` +} + +func BlockDeletionMarkFromThanosMarker(mark *metadata.DeletionMark) *BlockDeletionMark { + return &BlockDeletionMark{ + ID: mark.ID, + DeletionTime: mark.DeletionTime, + } +} + +// Blocks holds a set of blocks in the index. No ordering guaranteed. +type Blocks []*Block + +func (s Blocks) GetULIDs() []ulid.ULID { + ids := make([]ulid.ULID, len(s)) + for i, m := range s { + ids[i] = m.ID + } + return ids +} + +func (s Blocks) String() string { + b := strings.Builder{} + + for idx, m := range s { + if idx > 0 { + b.WriteString(", ") + } + b.WriteString(m.String()) + } + + return b.String() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go new file mode 100644 index 0000000000000..a477a74250ff0 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go @@ -0,0 +1,38 @@ +package bucketindex + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/oklog/ulid" + "github.com/thanos-io/thanos/pkg/block/metadata" +) + +const ( + MarkersPathname = "markers" +) + +// BlockDeletionMarkFilepath returns the path, relative to the tenant's bucket location, +// of a block deletion mark in the bucket markers location. +func BlockDeletionMarkFilepath(blockID ulid.ULID) string { + return fmt.Sprintf("%s/%s-%s", MarkersPathname, blockID.String(), metadata.DeletionMarkFilename) +} + +// IsBlockDeletionMarkFilename returns whether the input filename matches the expected pattern +// of block deletion markers stored in the markers location. +func IsBlockDeletionMarkFilename(name string) (ulid.ULID, bool) { + parts := strings.SplitN(name, "-", 2) + if len(parts) != 2 { + return ulid.ULID{}, false + } + + // Ensure the 2nd part matches the block deletion mark filename. + if parts[1] != metadata.DeletionMarkFilename { + return ulid.ULID{}, false + } + + // Ensure the 1st part is a valid block ID. + id, err := ulid.Parse(filepath.Base(parts[0])) + return id, err == nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go new file mode 100644 index 0000000000000..1dcfdea981395 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go @@ -0,0 +1,121 @@ +package bucketindex + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "path" + + "github.com/oklog/ulid" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// globalMarkersBucket is a bucket client which stores markers (eg. block deletion marks) in a per-tenant +// global location too. +type globalMarkersBucket struct { + parent objstore.Bucket +} + +// BucketWithGlobalMarkers wraps the input bucket into a bucket which also keeps track of markers +// in the global markers location. +func BucketWithGlobalMarkers(b objstore.Bucket) objstore.Bucket { + return &globalMarkersBucket{ + parent: b, + } +} + +// Upload implements objstore.Bucket. +func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader) error { + blockID, ok := b.isBlockDeletionMark(name) + if !ok { + return b.parent.Upload(ctx, name, r) + } + + // Read the marker. + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // Upload it to the original location. + if err := b.parent.Upload(ctx, name, bytes.NewReader(body)); err != nil { + return err + } + + // Upload it to the global markers location too. + globalMarkPath := path.Clean(path.Join(path.Dir(name), "../", BlockDeletionMarkFilepath(blockID))) + return b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body)) +} + +// Delete implements objstore.Bucket. +func (b *globalMarkersBucket) Delete(ctx context.Context, name string) error { + // Call the parent. + if err := b.parent.Delete(ctx, name); err != nil { + return err + } + + // Delete the marker in the global markers location too. + if blockID, ok := b.isBlockDeletionMark(name); ok { + globalMarkPath := path.Clean(path.Join(path.Dir(name), "../", BlockDeletionMarkFilepath(blockID))) + if err := b.parent.Delete(ctx, globalMarkPath); err != nil { + if !b.parent.IsObjNotFoundErr(err) { + return err + } + } + } + + return nil +} + +// Name implements objstore.Bucket. +func (b *globalMarkersBucket) Name() string { + return b.parent.Name() +} + +// Close implements objstore.Bucket. +func (b *globalMarkersBucket) Close() error { + return b.parent.Close() +} + +// Iter implements objstore.Bucket. +func (b *globalMarkersBucket) Iter(ctx context.Context, dir string, f func(string) error) error { + return b.parent.Iter(ctx, dir, f) +} + +// Get implements objstore.Bucket. +func (b *globalMarkersBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + return b.parent.Get(ctx, name) +} + +// GetRange implements objstore.Bucket. +func (b *globalMarkersBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + return b.parent.GetRange(ctx, name, off, length) +} + +// Exists implements objstore.Bucket. +func (b *globalMarkersBucket) Exists(ctx context.Context, name string) (bool, error) { + return b.parent.Exists(ctx, name) +} + +// IsObjNotFoundErr implements objstore.Bucket. +func (b *globalMarkersBucket) IsObjNotFoundErr(err error) bool { + return b.parent.IsObjNotFoundErr(err) +} + +// Attributes implements objstore.Bucket. +func (b *globalMarkersBucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { + return b.parent.Attributes(ctx, name) +} + +func (b *globalMarkersBucket) isBlockDeletionMark(name string) (ulid.ULID, bool) { + if path.Base(name) != metadata.DeletionMarkFilename { + return ulid.ULID{}, false + } + + // Parse the block ID in the path. If there's not block ID, then it's not the per-block + // deletion mark. + return block.IsBlockDir(path.Dir(name)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go new file mode 100644 index 0000000000000..3ad3979847a50 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go @@ -0,0 +1,50 @@ +package bucketindex + +import ( + "compress/gzip" + "context" + "encoding/json" + + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + + "github.com/cortexproject/cortex/pkg/storage/bucket" +) + +var ( + ErrIndexNotFound = errors.New("bucket index not found") + ErrIndexCorrupted = errors.New("bucket index corrupted") +) + +// ReadIndex reads, parses and returns a bucket index from the bucket. +func ReadIndex(ctx context.Context, bkt objstore.Bucket, userID string, logger log.Logger) (*Index, error) { + bkt = bucket.NewUserBucketClient(userID, bkt) + + // Get the bucket index. + reader, err := bkt.Get(ctx, IndexCompressedFilename) + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return nil, ErrIndexNotFound + } + return nil, errors.Wrap(err, "read bucket index") + } + defer runutil.CloseWithLogOnErr(logger, reader, "close bucket index reader") + + // Read all the content. + gzipReader, err := gzip.NewReader(reader) + if err != nil { + return nil, ErrIndexCorrupted + } + defer runutil.CloseWithLogOnErr(logger, gzipReader, "close bucket index gzip reader") + + // Deserialize it. + index := &Index{} + d := json.NewDecoder(gzipReader) + if err := d.Decode(index); err != nil { + return nil, ErrIndexCorrupted + } + + return index, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go new file mode 100644 index 0000000000000..55f37020a9a05 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go @@ -0,0 +1,255 @@ +package bucketindex + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "io/ioutil" + "path" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + + "github.com/cortexproject/cortex/pkg/storage/bucket" + "github.com/cortexproject/cortex/pkg/util" +) + +var ( + ErrBlockMetaNotFound = block.ErrorSyncMetaNotFound + ErrBlockMetaCorrupted = block.ErrorSyncMetaCorrupted + ErrBlockDeletionMarkNotFound = errors.New("block deletion mark not found") + ErrBlockDeletionMarkCorrupted = errors.New("block deletion mark corrupted") +) + +// Writer is responsible to generate and write a bucket index. +type Writer struct { + bkt objstore.InstrumentedBucket + logger log.Logger +} + +func NewWriter(bkt objstore.Bucket, userID string, logger log.Logger) *Writer { + return &Writer{ + bkt: bucket.NewUserBucketClient(userID, bkt), + logger: util.WithUserID(userID, logger), + } +} + +// WriteIndex generates the bucket index and writes it to the storage. If the old index is not +// passed in input, then the bucket index will be generated from scratch. +func (w *Writer) WriteIndex(ctx context.Context, old *Index) (*Index, error) { + idx, err := w.GenerateIndex(ctx, old) + if err != nil { + return nil, errors.Wrap(err, "generate bucket index") + } + + // Marshal the index. + content, err := json.Marshal(idx) + if err != nil { + return nil, errors.Wrap(err, "marshal bucket index") + } + + // Compress it. + var gzipContent bytes.Buffer + gzip := gzip.NewWriter(&gzipContent) + gzip.Name = IndexFilename + + if _, err := gzip.Write(content); err != nil { + return nil, errors.Wrap(err, "gzip bucket index") + } + if err := gzip.Close(); err != nil { + return nil, errors.Wrap(err, "close gzip bucket index") + } + + // Upload the index to the storage. + if err := w.bkt.Upload(ctx, IndexCompressedFilename, &gzipContent); err != nil { + return nil, errors.Wrap(err, "upload bucket index") + } + + return idx, nil +} + +// GenerateIndex generates the bucket index and returns it, without storing it to the storage. +// If the old index is not passed in input, then the bucket index will be generated from scratch. +func (w *Writer) GenerateIndex(ctx context.Context, old *Index) (*Index, error) { + var oldBlocks []*Block + var oldBlockDeletionMarks []*BlockDeletionMark + + // Read the old index, if provided. + if old != nil { + oldBlocks = old.Blocks + oldBlockDeletionMarks = old.BlockDeletionMarks + } + + blocks, err := w.generateBlocksIndex(ctx, oldBlocks) + if err != nil { + return nil, err + } + + blockDeletionMarks, err := w.generateBlockDeletionMarksIndex(ctx, oldBlockDeletionMarks) + if err != nil { + return nil, err + } + + return &Index{ + Version: IndexVersion1, + Blocks: blocks, + BlockDeletionMarks: blockDeletionMarks, + UpdatedAt: time.Now().Unix(), + }, nil +} + +func (w *Writer) generateBlocksIndex(ctx context.Context, old []*Block) ([]*Block, error) { + out := make([]*Block, 0, len(old)) + discovered := map[ulid.ULID]struct{}{} + + // Find all blocks in the storage. + err := w.bkt.Iter(ctx, "", func(name string) error { + if id, ok := block.IsBlockDir(name); ok { + discovered[id] = struct{}{} + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "list blocks") + } + + // Since blocks are immutable, all blocks already existing in the index can just be copied. + for _, b := range old { + if _, ok := discovered[b.ID]; ok { + out = append(out, b) + delete(discovered, b.ID) + } + } + + // Remaining blocks are new ones and we have to fetch the meta.json for each of them, in order + // to find out if their upload has been completed (meta.json is uploaded last) and get the block + // information to store in the bucket index. + for id := range discovered { + b, err := w.generateBlockIndexEntry(ctx, id) + if errors.Is(err, ErrBlockMetaNotFound) { + level.Warn(w.logger).Log("msg", "skipped partial block when generating bucket index", "block", id.String()) + continue + } + if errors.Is(err, ErrBlockMetaCorrupted) { + level.Error(w.logger).Log("msg", "skipped block with corrupted meta.json when generating bucket index", "block", id.String(), "err", err) + continue + } + if err != nil { + return nil, err + } + + out = append(out, b) + } + + return out, nil +} + +func (w *Writer) generateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Block, error) { + metaFile := path.Join(id.String(), block.MetaFilename) + + // Get the block's meta.json file. + r, err := w.bkt.Get(ctx, metaFile) + if w.bkt.IsObjNotFoundErr(err) { + return nil, ErrBlockMetaNotFound + } + if err != nil { + return nil, errors.Wrapf(err, "get block meta file: %v", metaFile) + } + defer runutil.CloseWithLogOnErr(w.logger, r, "close get block meta file") + + metaContent, err := ioutil.ReadAll(r) + if err != nil { + return nil, errors.Wrapf(err, "read block meta file: %v", metaFile) + } + + // Unmarshal it. + m := metadata.Meta{} + if err := json.Unmarshal(metaContent, &m); err != nil { + return nil, errors.Wrapf(ErrBlockMetaCorrupted, "unmarshal block meta file %s: %v", metaFile, err) + } + + if m.Version != metadata.TSDBVersion1 { + return nil, errors.Errorf("unexpected block meta version: %s version: %d", metaFile, m.Version) + } + + block := BlockFromThanosMeta(m) + + // Get the meta.json attributes. + attrs, err := w.bkt.Attributes(ctx, metaFile) + if err != nil { + return nil, errors.Wrapf(err, "read meta file attributes: %v", metaFile) + } + + // Since the meta.json file is the last file of a block being uploaded and it's immutable + // we can safely assume that the last modified timestamp of the meta.json is the time when + // the block has completed to be uploaded. + block.UploadedAt = attrs.LastModified.Unix() + + return block, nil +} + +func (w *Writer) generateBlockDeletionMarksIndex(ctx context.Context, old []*BlockDeletionMark) ([]*BlockDeletionMark, error) { + out := make([]*BlockDeletionMark, 0, len(old)) + discovered := map[ulid.ULID]struct{}{} + + // Find all markers in the storage. + err := w.bkt.Iter(ctx, MarkersPathname+"/", func(name string) error { + if blockID, ok := IsBlockDeletionMarkFilename(path.Base(name)); ok { + discovered[blockID] = struct{}{} + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "list block deletion marks") + } + + // Since deletion marks are immutable, all markers already existing in the index can just be copied. + for _, m := range old { + if _, ok := discovered[m.ID]; ok { + out = append(out, m) + delete(discovered, m.ID) + } + } + + // Remaining markers are new ones and we have to fetch them. + for id := range discovered { + m, err := w.generateBlockDeletionMarkIndexEntry(ctx, id) + if errors.Is(err, ErrBlockDeletionMarkNotFound) { + // This could happen if the block is permanently deleted between the "list objects" and now. + level.Warn(w.logger).Log("msg", "skipped missing block deletion mark when generating bucket index", "block", id.String()) + continue + } + if errors.Is(err, ErrBlockDeletionMarkCorrupted) { + level.Error(w.logger).Log("msg", "skipped corrupted block deletion mark when generating bucket index", "block", id.String(), "err", err) + continue + } + if err != nil { + return nil, err + } + + out = append(out, m) + } + + return out, nil +} + +func (w *Writer) generateBlockDeletionMarkIndexEntry(ctx context.Context, id ulid.ULID) (*BlockDeletionMark, error) { + m := metadata.DeletionMark{} + + if err := metadata.ReadMarker(ctx, w.logger, w.bkt, id.String(), &m); err != nil { + if errors.Is(err, metadata.ErrorUnmarshalMarker) { + return nil, errors.Wrap(ErrBlockDeletionMarkCorrupted, err.Error()) + } + return nil, err + } + + return BlockDeletionMarkFromThanosMarker(&m), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go index 99f082e41ad7e..c4550ec899ac0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go @@ -58,7 +58,7 @@ func (cfg *ChunksCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.Int64Var(&cfg.SubrangeSize, prefix+"subrange-size", 16000, "Size of each subrange that bucket object is split into for better caching.") f.IntVar(&cfg.MaxGetRangeRequests, prefix+"max-get-range-requests", 3, "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.") - f.DurationVar(&cfg.AttributesTTL, prefix+"attributes-ttl", 24*time.Hour, "TTL for caching object attributes for chunks.") + f.DurationVar(&cfg.AttributesTTL, prefix+"attributes-ttl", 168*time.Hour, "TTL for caching object attributes for chunks.") f.DurationVar(&cfg.SubrangeTTL, prefix+"subrange-ttl", 24*time.Hour, "TTL for caching individual chunks subranges.") } @@ -76,6 +76,7 @@ type MetadataCacheConfig struct { MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` MetafileMaxSize int `yaml:"metafile_max_size_bytes"` + MetafileAttributesTTL time.Duration `yaml:"metafile_attributes_ttl"` } func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { @@ -86,10 +87,11 @@ func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix f.DurationVar(&cfg.TenantsListTTL, prefix+"tenants-list-ttl", 15*time.Minute, "How long to cache list of tenants in the bucket.") f.DurationVar(&cfg.TenantBlocksListTTL, prefix+"tenant-blocks-list-ttl", 5*time.Minute, "How long to cache list of blocks for each tenant.") f.DurationVar(&cfg.ChunksListTTL, prefix+"chunks-list-ttl", 24*time.Hour, "How long to cache list of chunks for a block.") - f.DurationVar(&cfg.MetafileExistsTTL, prefix+"metafile-exists-ttl", 2*time.Hour, "How long to cache information that block metafile exists.") - f.DurationVar(&cfg.MetafileDoesntExistTTL, prefix+"metafile-doesnt-exist-ttl", 5*time.Minute, "How long to cache information that block metafile doesn't exist.") + f.DurationVar(&cfg.MetafileExistsTTL, prefix+"metafile-exists-ttl", 2*time.Hour, "How long to cache information that block metafile exists. Also used for user deletion mark file.") + f.DurationVar(&cfg.MetafileDoesntExistTTL, prefix+"metafile-doesnt-exist-ttl", 5*time.Minute, "How long to cache information that block metafile doesn't exist. Also used for user deletion mark file.") f.DurationVar(&cfg.MetafileContentTTL, prefix+"metafile-content-ttl", 24*time.Hour, "How long to cache content of the metafile.") f.IntVar(&cfg.MetafileMaxSize, prefix+"metafile-max-size-bytes", 1*1024*1024, "Maximum size of metafile content to cache in bytes.") + f.DurationVar(&cfg.MetafileAttributesTTL, prefix+"metafile-attributes-ttl", 168*time.Hour, "How long to cache attributes of the block metafile.") } func (cfg *MetadataCacheConfig) Validate() error { @@ -120,6 +122,7 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg.CacheExists("metafile", metadataCache, isMetaFile, metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL) cfg.CacheGet("metafile", metadataCache, isMetaFile, metadataConfig.MetafileMaxSize, metadataConfig.MetafileContentTTL, metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL) + cfg.CacheAttributes("metafile", metadataCache, isMetaFile, metadataConfig.MetafileAttributesTTL) codec := snappyIterCodec{storecache.JSONIterCodec{}} cfg.CacheIter("tenants-iter", metadataCache, isTenantsDir, metadataConfig.TenantsListTTL, codec) @@ -159,7 +162,7 @@ var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) } func isMetaFile(name string) bool { - return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) + return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkPath) } func isTenantsDir(name string) bool { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index e0493f097c4ce..5105a79d7162c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -2,36 +2,20 @@ package tsdb import ( "flag" - "fmt" "path/filepath" "strings" "time" "github.com/alecthomas/units" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/wal" "github.com/thanos-io/thanos/pkg/store" - "github.com/cortexproject/cortex/pkg/storage/backend/azure" - "github.com/cortexproject/cortex/pkg/storage/backend/filesystem" - "github.com/cortexproject/cortex/pkg/storage/backend/gcs" - "github.com/cortexproject/cortex/pkg/storage/backend/s3" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/storage/bucket" ) const ( - // BackendS3 is the value for the S3 storage backend - BackendS3 = "s3" - - // BackendGCS is the value for the GCS storage backend - BackendGCS = "gcs" - - // BackendAzure is the value for the Azure storage backend - BackendAzure = "azure" - - // BackendFilesystem is the value for the filesystem storge backend - BackendFilesystem = "filesystem" - // TenantIDExternalLabel is the external label containing the tenant ID, // set when shipping blocks to the storage. TenantIDExternalLabel = "__org_id__" @@ -43,38 +27,29 @@ const ( // ShardIDExternalLabel is the external label containing the shard ID // and can be used to shard blocks. ShardIDExternalLabel = "__shard_id__" + + // How often are open TSDBs checked for being idle and closed. + DefaultCloseIdleTSDBInterval = 5 * time.Minute + + // How often to check for tenant deletion mark. + DeletionMarkCheckInterval = 1 * time.Hour ) // Validation errors var ( - supportedBackends = []string{BackendS3, BackendGCS, BackendAzure, BackendFilesystem} - - errUnsupportedStorageBackend = errors.New("unsupported TSDB storage backend") errInvalidShipConcurrency = errors.New("invalid TSDB ship concurrency") + errInvalidOpeningConcurrency = errors.New("invalid TSDB opening concurrency") errInvalidCompactionInterval = errors.New("invalid TSDB compaction interval") errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency") + errInvalidWALSegmentSizeBytes = errors.New("invalid TSDB WAL segment size bytes") errInvalidStripeSize = errors.New("invalid TSDB stripe size") errEmptyBlockranges = errors.New("empty block ranges for TSDB") ) -// BucketConfig holds configuration for accessing long-term storage. -type BucketConfig struct { - Backend string `yaml:"backend"` - // Backends - S3 s3.Config `yaml:"s3"` - GCS gcs.Config `yaml:"gcs"` - Azure azure.Config `yaml:"azure"` - Filesystem filesystem.Config `yaml:"filesystem"` - - // Not used internally, meant to allow callers to wrap Buckets - // created using this config - Middlewares []func(objstore.Bucket) (objstore.Bucket, error) `yaml:"-"` -} - // BlocksStorageConfig holds the config information for the blocks storage. //nolint:golint type BlocksStorageConfig struct { - Bucket BucketConfig `yaml:",inline"` + Bucket bucket.Config `yaml:",inline"` BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the store-gateway synchronizes blocks stored in the bucket."` TSDB TSDBConfig `yaml:"tsdb"` } @@ -116,31 +91,13 @@ func (d *DurationList) ToMilliseconds() []int64 { return values } -// RegisterFlags registers the TSDB Backend -func (cfg *BucketConfig) RegisterFlags(f *flag.FlagSet) { - cfg.S3.RegisterFlags(f) - cfg.GCS.RegisterFlags(f) - cfg.Azure.RegisterFlags(f) - cfg.Filesystem.RegisterFlags(f) - - f.StringVar(&cfg.Backend, "blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) -} - // RegisterFlags registers the TSDB flags func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet) { - cfg.Bucket.RegisterFlags(f) + cfg.Bucket.RegisterFlagsWithPrefix("blocks-storage.", f) cfg.BucketStore.RegisterFlags(f) cfg.TSDB.RegisterFlags(f) } -func (cfg *BucketConfig) Validate() error { - if !util.StringsContain(supportedBackends, cfg.Backend) { - return errUnsupportedStorageBackend - } - - return nil -} - // Validate the config. func (cfg *BlocksStorageConfig) Validate() error { if err := cfg.Bucket.Validate(); err != nil { @@ -165,9 +122,12 @@ type TSDBConfig struct { HeadCompactionInterval time.Duration `yaml:"head_compaction_interval"` HeadCompactionConcurrency int `yaml:"head_compaction_concurrency"` HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"` + HeadChunksWriteBufferSize int `yaml:"head_chunks_write_buffer_size_bytes"` StripeSize int `yaml:"stripe_size"` WALCompressionEnabled bool `yaml:"wal_compression_enabled"` + WALSegmentSizeBytes int `yaml:"wal_segment_size_bytes"` FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"` + CloseIdleTSDBTimeout time.Duration `yaml:"close_idle_tsdb_timeout"` // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup. MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"` @@ -175,6 +135,9 @@ type TSDBConfig struct { // If true, user TSDBs are not closed on shutdown. Only for testing. // If false (default), user TSDBs are closed to make sure all resources are released and closed properly. KeepUserTSDBOpenOnShutdown bool `yaml:"-"` + + // How often to check for idle TSDBs for closing. DefaultCloseIdleTSDBInterval is not suitable for testing, so tests can override. + CloseIdleTSDBInterval time.Duration `yaml:"-"` } // RegisterFlags registers the TSDBConfig flags. @@ -192,9 +155,12 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.HeadCompactionInterval, "blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") f.IntVar(&cfg.HeadCompactionConcurrency, "blocks-storage.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") f.DurationVar(&cfg.HeadCompactionIdleTimeout, "blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") + f.IntVar(&cfg.HeadChunksWriteBufferSize, "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes", chunks.DefaultWriteBufferSize, "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.") f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") + f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wal.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") + f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 0, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") } // Validate the config. @@ -203,6 +169,10 @@ func (cfg *TSDBConfig) Validate() error { return errInvalidShipConcurrency } + if cfg.MaxTSDBOpeningConcurrencyOnStartup <= 0 { + return errInvalidOpeningConcurrency + } + if cfg.HeadCompactionInterval <= 0 || cfg.HeadCompactionInterval > 5*time.Minute { return errInvalidCompactionInterval } @@ -211,6 +181,10 @@ func (cfg *TSDBConfig) Validate() error { return errInvalidCompactionConcurrency } + if cfg.HeadChunksWriteBufferSize < chunks.MinWriteBufferSize || cfg.HeadChunksWriteBufferSize > chunks.MaxWriteBufferSize || cfg.HeadChunksWriteBufferSize%1024 != 0 { + return errors.Errorf("head chunks write buffer size must be a multiple of 1024 between %d and %d", chunks.MinWriteBufferSize, chunks.MaxWriteBufferSize) + } + if cfg.StripeSize <= 1 || (cfg.StripeSize&(cfg.StripeSize-1)) != 0 { // ensure stripe size is a positive power of 2 return errInvalidStripeSize } @@ -219,6 +193,10 @@ func (cfg *TSDBConfig) Validate() error { return errEmptyBlockranges } + if cfg.WALSegmentSizeBytes <= 0 { + return errInvalidWALSegmentSizeBytes + } + return nil } @@ -243,6 +221,11 @@ type BucketStoreConfig struct { MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + // Controls whether index-header lazy loading is enabled. This config option is hidden + // while it is marked as experimental. + IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled" doc:"hidden"` + IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout" doc:"hidden"` + // Controls what is the ratio of postings offsets store will hold in memory. // Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings. // It's meant for setups that want low baseline memory pressure and where less traffic is expected. @@ -269,6 +252,8 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") + f.BoolVar(&cfg.IndexHeaderLazyLoadingEnabled, "blocks-storage.bucket-store.index-header-lazy-loading-enabled", false, "If enabled, store-gateway will lazy load an index-header only once required by a query.") + f.DurationVar(&cfg.IndexHeaderLazyLoadingIdleTimeout, "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", 20*time.Minute, "If index-header lazy loading is enabled and this setting is > 0, the store-gateway will offload unused index-headers after 'idle timeout' inactivity.") } // Validate the config. diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go index cbe8c3520d43b..aac9c60b9d99e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go @@ -49,7 +49,7 @@ func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.StringVar(&cfg.Backend, prefix+"backend", IndexCacheBackendDefault, fmt.Sprintf("The index cache backend type. Supported values: %s.", strings.Join(supportedIndexCacheBackends, ", "))) - f.BoolVar(&cfg.PostingsCompression, prefix+"postings-compression-enabled", false, "Compress postings before storing them to postings cache.") + f.BoolVar(&cfg.PostingsCompression, prefix+"postings-compression-enabled", false, "Deprecated: compress postings before storing them to postings cache. This option is unused and postings compression is always enabled.") // TODO remove in v1.8.0. cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.") cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go index 8b3fd2e307a0c..60c1a4e2861d4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go @@ -27,7 +27,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefi f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 50, "The maximum number of concurrent asynchronous operations can occur.") f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed.") f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.") - f.IntVar(&cfg.MaxGetMultiBatchSize, prefix+"max-get-multi-batch-size", 0, "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are splitted into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.") + f.IntVar(&cfg.MaxGetMultiBatchSize, prefix+"max-get-multi-batch-size", 0, "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.") f.IntVar(&cfg.MaxItemSize, prefix+"max-item-size", 1024*1024, "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go new file mode 100644 index 0000000000000..1d4872323ef6f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go @@ -0,0 +1,40 @@ +package tsdb + +import ( + "bytes" + "context" + "encoding/json" + "path" + "time" + + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// Relative to user-specific prefix. +const TenantDeletionMarkPath = "markers/tenant-deletion-mark.json" + +type TenantDeletionMark struct { + // Unix timestamp when deletion marker was created. + DeletionTime int64 `json:"deletion_time"` +} + +// Checks for deletion mark for tenant. Errors other than "object not found" are returned. +func TenantDeletionMarkExists(ctx context.Context, bkt objstore.BucketReader, userID string) (bool, error) { + markerFile := path.Join(userID, TenantDeletionMarkPath) + + return bkt.Exists(ctx, markerFile) +} + +// Uploads deletion mark to the tenant "directory". +func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID string) error { + m := &TenantDeletionMark{DeletionTime: time.Now().Unix()} + + data, err := json.Marshal(m) + if err != nil { + return errors.Wrap(err, "serialize tenant deletion mark") + } + + markerFile := path.Join(userID, TenantDeletionMarkPath) + return errors.Wrap(bkt.Upload(ctx, markerFile, bytes.NewReader(data)), "upload tenant deletion mark") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go new file mode 100644 index 0000000000000..825a212da2d45 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go @@ -0,0 +1,72 @@ +package tsdb + +import ( + "context" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// AllUsers returns true to each call and should be used whenever the UsersScanner should not filter out +// any user due to sharding. +func AllUsers(_ string) (bool, error) { + return true, nil +} + +type UsersScanner struct { + bucketClient objstore.Bucket + logger log.Logger + isOwned func(userID string) (bool, error) +} + +func NewUsersScanner(bucketClient objstore.Bucket, isOwned func(userID string) (bool, error), logger log.Logger) *UsersScanner { + return &UsersScanner{ + bucketClient: bucketClient, + logger: logger, + isOwned: isOwned, + } +} + +// ScanUsers returns a fresh list of users found in the storage, that are not marked for deletion, +// and list of users marked for deletion. +// +// If sharding is enabled, returned lists contains only the users owned by this instance. +func (s *UsersScanner) ScanUsers(ctx context.Context) (users, markedForDeletion []string, err error) { + err = s.bucketClient.Iter(ctx, "", func(entry string) error { + users = append(users, strings.TrimSuffix(entry, "/")) + return nil + }) + if err != nil { + return nil, nil, err + } + + // Check users for being owned by instance, and split users into non-deleted and deleted. + // We do these checks after listing all users, to improve cacheability of Iter (result is only cached at the end of Iter call). + for ix := 0; ix < len(users); { + userID := users[ix] + + // Check if it's owned by this instance. + owned, err := s.isOwned(userID) + if err != nil { + level.Warn(s.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) + } else if !owned { + users = append(users[:ix], users[ix+1:]...) + continue + } + + deletionMarkExists, err := TenantDeletionMarkExists(ctx, s.bucketClient, userID) + if err != nil { + level.Warn(s.logger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err) + } else if deletionMarkExists { + users = append(users[:ix], users[ix+1:]...) + markedForDeletion = append(markedForDeletion, userID) + continue + } + + ix++ + } + + return users, markedForDeletion, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go index 35cbd9992c819..224101db878fe 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go @@ -1,8 +1,6 @@ package storegateway import ( - "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/util" @@ -11,9 +9,7 @@ import ( // BucketStoreMetrics aggregates metrics exported by Thanos Bucket Store // and re-exports those aggregates as Cortex metrics. type BucketStoreMetrics struct { - // Maps userID -> registry - regsMu sync.Mutex - regs map[string]*prometheus.Registry + regs *util.UserRegistries // exported metrics, gathered from Thanos BucketStore blockLoads *prometheus.Desc @@ -40,11 +36,17 @@ type BucketStoreMetrics struct { seriesFetchDuration *prometheus.Desc postingsFetchDuration *prometheus.Desc + + indexHeaderLazyLoadCount *prometheus.Desc + indexHeaderLazyLoadFailedCount *prometheus.Desc + indexHeaderLazyUnloadCount *prometheus.Desc + indexHeaderLazyUnloadFailedCount *prometheus.Desc + indexHeaderLazyLoadDuration *prometheus.Desc } func NewBucketStoreMetrics() *BucketStoreMetrics { return &BucketStoreMetrics{ - regs: map[string]*prometheus.Registry{}, + regs: util.NewUserRegistries(), blockLoads: prometheus.NewDesc( "cortex_bucket_store_block_loads_total", @@ -137,25 +139,32 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_cached_postings_fetch_duration_seconds", "Time it takes to fetch postings to respond a request sent to store-gateway. It includes both the time to fetch it from cache and from storage in case of cache misses.", nil, nil), + + indexHeaderLazyLoadCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_total", + "Total number of index-header lazy load operations.", + nil, nil), + indexHeaderLazyLoadFailedCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_failed_total", + "Total number of failed index-header lazy load operations.", + nil, nil), + indexHeaderLazyUnloadCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_unload_total", + "Total number of index-header lazy unload operations.", + nil, nil), + indexHeaderLazyUnloadFailedCount: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_unload_failed_total", + "Total number of failed index-header lazy unload operations.", + nil, nil), + indexHeaderLazyLoadDuration: prometheus.NewDesc( + "cortex_bucket_store_indexheader_lazy_load_duration_seconds", + "Duration of the index-header lazy loading in seconds.", + nil, nil), } } func (m *BucketStoreMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regsMu.Lock() - m.regs[user] = reg - m.regsMu.Unlock() -} - -func (m *BucketStoreMetrics) registries() map[string]*prometheus.Registry { - regs := map[string]*prometheus.Registry{} - - m.regsMu.Lock() - defer m.regsMu.Unlock() - for uid, r := range m.regs { - regs[uid] = r - } - - return regs + m.regs.AddUserRegistry(user, reg) } func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { @@ -183,10 +192,16 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.seriesFetchDuration out <- m.postingsFetchDuration + + out <- m.indexHeaderLazyLoadCount + out <- m.indexHeaderLazyLoadFailedCount + out <- m.indexHeaderLazyUnloadCount + out <- m.indexHeaderLazyUnloadFailedCount + out <- m.indexHeaderLazyLoadDuration } func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { - data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + data := m.regs.BuildMetricFamiliesPerUser() data.SendSumOfCounters(out, m.blockLoads, "thanos_bucket_store_block_loads_total") data.SendSumOfCounters(out, m.blockLoadFailures, "thanos_bucket_store_block_load_failures_total") @@ -215,4 +230,10 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfHistograms(out, m.seriesFetchDuration, "thanos_bucket_store_cached_series_fetch_duration_seconds") data.SendSumOfHistograms(out, m.postingsFetchDuration, "thanos_bucket_store_cached_postings_fetch_duration_seconds") + + data.SendSumOfCounters(out, m.indexHeaderLazyLoadCount, "thanos_bucket_store_indexheader_lazy_load_total") + data.SendSumOfCounters(out, m.indexHeaderLazyLoadFailedCount, "thanos_bucket_store_indexheader_lazy_load_failed_total") + data.SendSumOfCounters(out, m.indexHeaderLazyUnloadCount, "thanos_bucket_store_indexheader_lazy_unload_total") + data.SendSumOfCounters(out, m.indexHeaderLazyUnloadFailedCount, "thanos_bucket_store_indexheader_lazy_unload_failed_total") + data.SendSumOfHistograms(out, m.indexHeaderLazyLoadDuration, "thanos_bucket_store_indexheader_lazy_load_duration_seconds") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go index a1237b77c5ebf..cecd68e1971b1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go @@ -26,6 +26,7 @@ import ( "github.com/weaveworks/common/logging" "google.golang.org/grpc/metadata" + "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/spanlogger" @@ -154,7 +155,7 @@ func (u *BucketStores) syncUsersBlocks(ctx context.Context, f func(context.Conte wg := &sync.WaitGroup{} jobs := make(chan job) - errs := tsdb_errors.MultiError{} + errs := tsdb_errors.NewMulti() errsMx := sync.Mutex{} // Scan users in the bucket. In case of error, it may return a subset of users. If we sync a subset of users @@ -247,6 +248,42 @@ func (u *BucketStores) Series(req *storepb.SeriesRequest, srv storepb.Store_Seri }) } +// LabelNames implements the Storegateway proto service. +func (u *BucketStores) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelNames") + defer spanLog.Span.Finish() + + userID := getUserIDFromGRPCContext(spanCtx) + if userID == "" { + return nil, fmt.Errorf("no userID") + } + + store := u.getStore(userID) + if store == nil { + return &storepb.LabelNamesResponse{}, nil + } + + return store.LabelNames(ctx, req) +} + +// LabelValues implements the Storegateway proto service. +func (u *BucketStores) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + spanLog, spanCtx := spanlogger.New(ctx, "BucketStores.LabelValues") + defer spanLog.Span.Finish() + + userID := getUserIDFromGRPCContext(spanCtx) + if userID == "" { + return nil, fmt.Errorf("no userID") + } + + store := u.getStore(userID) + if store == nil { + return &storepb.LabelValuesResponse{}, nil + } + + return store.LabelValues(ctx, req) +} + // scanUsers in the bucket and return the list of found users. If an error occurs while // iterating the bucket, it may return both an error and a subset of the users in the bucket. func (u *BucketStores) scanUsers(ctx context.Context) ([]string, error) { @@ -291,7 +328,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro level.Info(userLogger).Log("msg", "creating user bucket store") - userBkt := tsdb.NewUserBucketClient(userID, u.bucket) + userBkt := bucket.NewUserBucketClient(userID, u.bucket) // Wrap the bucket reader to skip iterating the bucket at all if the user doesn't // belong to the store-gateway shard. We need to run the BucketStore synching anyway @@ -310,7 +347,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro // The sharding strategy filter MUST be before the ones we create here (order matters). append([]block.MetadataFilter{NewShardingMetadataFilterAdapter(userID, u.shardingStrategy)}, []block.MetadataFilter{ block.NewConsistencyDelayMetaFilter(userLogger, u.cfg.BucketStore.ConsistencyDelay, fetcherReg), - block.NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay), + block.NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay, u.cfg.BucketStore.MetaSyncConcurrency), // The duplicate filter has been intentionally omitted because it could cause troubles with // the consistency check done on the querier. The duplicate filter removes redundant blocks // but if the store-gateway removes redundant blocks before the querier discovers them, the @@ -344,9 +381,10 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro u.cfg.BucketStore.BlockSyncConcurrency, nil, // Do not limit timerange. false, // No need to enable backward compatibility with Thanos pre 0.8.0 queriers - u.cfg.BucketStore.IndexCache.PostingsCompression, u.cfg.BucketStore.PostingOffsetsInMemSampling, true, // Enable series hints. + u.cfg.BucketStore.IndexHeaderLazyLoadingEnabled, + u.cfg.BucketStore.IndexHeaderLazyLoadingIdleTimeout, ) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go index 3ae3458131ded..f94e08d43f944 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -19,6 +19,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/util" @@ -270,7 +271,7 @@ func (g *StoreGateway) running(ctx context.Context) error { defer syncTicker.Stop() if g.gatewayCfg.ShardingEnabled { - ringLastState, _ = g.ring.GetAll(ring.BlocksSync) // nolint:errcheck + ringLastState, _ = g.ring.GetAllHealthy(ring.BlocksSync) // nolint:errcheck ringTicker := time.NewTicker(util.DurationWithJitter(g.gatewayCfg.ShardingRing.RingCheckPeriod, 0.2)) defer ringTicker.Stop() ringTickerChan = ringTicker.C @@ -283,7 +284,7 @@ func (g *StoreGateway) running(ctx context.Context) error { case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. - currRingState, _ := g.ring.GetAll(ring.BlocksSync) // nolint:errcheck + currRingState, _ := g.ring.GetAllHealthy(ring.BlocksSync) // nolint:errcheck if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState @@ -319,6 +320,16 @@ func (g *StoreGateway) Series(req *storepb.SeriesRequest, srv storegatewaypb.Sto return g.stores.Series(req, srv) } +// LabelNames implements the Storegateway proto service. +func (g *StoreGateway) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + return g.stores.LabelNames(ctx, req) +} + +// LabelValues implements the Storegateway proto service. +func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + return g.stores.LabelValues(ctx, req) +} + func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the store-gateway instance in the ring we want to start from // a clean situation, so whatever is the state we set it JOINING, while we keep existing @@ -343,7 +354,7 @@ func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring. } func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.Bucket, "store-gateway", logger, reg) + bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "store-gateway", logger, reg) if err != nil { return nil, errors.Wrap(err, "create bucket client") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go index 1d02d1e996143..113dd61623517 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go @@ -1,8 +1,6 @@ package storegateway import ( - "sync" - "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/util" @@ -11,9 +9,7 @@ import ( // This struct aggregates metrics exported by Thanos MetaFetcher // and re-exports those aggregates as Cortex metrics. type MetadataFetcherMetrics struct { - // Maps userID -> registry - regsMu sync.Mutex - regs map[string]*prometheus.Registry + regs *util.UserRegistries // Exported metrics, gathered from Thanos MetaFetcher syncs *prometheus.Desc @@ -29,7 +25,7 @@ type MetadataFetcherMetrics struct { func NewMetadataFetcherMetrics() *MetadataFetcherMetrics { return &MetadataFetcherMetrics{ - regs: map[string]*prometheus.Registry{}, + regs: util.NewUserRegistries(), syncs: prometheus.NewDesc( "cortex_blocks_meta_syncs_total", @@ -55,25 +51,10 @@ func NewMetadataFetcherMetrics() *MetadataFetcherMetrics { } func (m *MetadataFetcherMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { - m.regsMu.Lock() - m.regs[user] = reg - m.regsMu.Unlock() -} - -func (m *MetadataFetcherMetrics) registries() map[string]*prometheus.Registry { - regs := map[string]*prometheus.Registry{} - - m.regsMu.Lock() - defer m.regsMu.Unlock() - for uid, r := range m.regs { - regs[uid] = r - } - - return regs + m.regs.AddUserRegistry(user, reg) } func (m *MetadataFetcherMetrics) Describe(out chan<- *prometheus.Desc) { - out <- m.syncs out <- m.syncFailures out <- m.syncDuration @@ -82,7 +63,7 @@ func (m *MetadataFetcherMetrics) Describe(out chan<- *prometheus.Desc) { } func (m *MetadataFetcherMetrics) Collect(out chan<- prometheus.Metric) { - data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + data := m.regs.BuildMetricFamiliesPerUser() data.SendSumOfCounters(out, m.syncs, "blocks_meta_syncs_total") data.SendSumOfCounters(out, m.syncFailures, "blocks_meta_sync_failures_total") diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go index 95c4c897f0afc..fa5913faf44ab 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go @@ -28,20 +28,24 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 204 bytes of a gzipped FileDescriptorProto + // 257 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4f, 0x2c, 0x49, 0x2d, 0x4f, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, 0xcc, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x4b, 0x32, 0x12, 0xf3, 0xf2, 0x8b, 0x75, 0x33, 0xf3, 0xa1, 0x2c, 0xfd, 0x82, 0xec, 0x74, 0xfd, 0xe2, 0x92, 0xfc, 0xa2, - 0x54, 0x08, 0x59, 0x90, 0xa4, 0x5f, 0x54, 0x90, 0x0c, 0x31, 0xc3, 0xc8, 0x93, 0x8b, 0x27, 0x18, - 0x24, 0xe8, 0x0e, 0x31, 0x4a, 0xc8, 0x92, 0x8b, 0x2d, 0x38, 0xb5, 0x28, 0x33, 0xb5, 0x58, 0x48, - 0x54, 0x0f, 0xa2, 0x5d, 0x0f, 0xc2, 0x0f, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x12, 0x43, - 0x17, 0x2e, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x35, 0x60, 0x74, 0x72, 0xb9, 0xf0, 0x50, 0x8e, 0xe1, - 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, - 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, - 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, - 0x2c, 0xc7, 0x10, 0xc5, 0x07, 0x76, 0x13, 0xdc, 0x27, 0x49, 0x6c, 0x60, 0x77, 0x19, 0x03, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xc5, 0x38, 0xd0, 0xf6, 0xec, 0x00, 0x00, 0x00, + 0x54, 0x08, 0x59, 0x90, 0xa4, 0x5f, 0x54, 0x90, 0x0c, 0x31, 0xc3, 0xe8, 0x1a, 0x23, 0x17, 0x4f, + 0x30, 0x48, 0xd4, 0x1d, 0x62, 0x96, 0x90, 0x25, 0x17, 0x5b, 0x70, 0x6a, 0x51, 0x66, 0x6a, 0xb1, + 0x90, 0xa8, 0x1e, 0x44, 0xbf, 0x1e, 0x84, 0x1f, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, + 0x86, 0x2e, 0x5c, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x6a, 0xc0, 0x28, 0xe4, 0xcc, 0xc5, 0xe5, 0x93, + 0x98, 0x94, 0x9a, 0xe3, 0x97, 0x98, 0x9b, 0x5a, 0x2c, 0x24, 0x09, 0x53, 0x87, 0x10, 0x83, 0x19, + 0x21, 0x85, 0x4d, 0x0a, 0x62, 0x8c, 0x90, 0x1b, 0x17, 0x37, 0x58, 0x34, 0x2c, 0x31, 0xa7, 0x34, + 0xb5, 0x58, 0x08, 0x55, 0x29, 0x44, 0x10, 0x66, 0x8c, 0x34, 0x56, 0x39, 0x88, 0x39, 0x4e, 0x2e, + 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, + 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xf8, 0xc0, 0x21, 0x04, 0x0f, 0xd7, 0x24, 0x36, + 0x70, 0x28, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xec, 0xe6, 0x0a, 0x7a, 0x01, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -64,6 +68,10 @@ type StoreGatewayClient interface { // // Series are sorted. Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) + // LabelNames returns all label names that is available. + LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) + // LabelValues returns all label values for given label name. + LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) } type storeGatewayClient struct { @@ -106,6 +114,24 @@ func (x *storeGatewaySeriesClient) Recv() (*storepb.SeriesResponse, error) { return m, nil } +func (c *storeGatewayClient) LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) { + out := new(storepb.LabelNamesResponse) + err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelNames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeGatewayClient) LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) { + out := new(storepb.LabelValuesResponse) + err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // StoreGatewayServer is the server API for StoreGateway service. type StoreGatewayServer interface { // Series streams each Series for given label matchers and time range. @@ -116,6 +142,10 @@ type StoreGatewayServer interface { // // Series are sorted. Series(*storepb.SeriesRequest, StoreGateway_SeriesServer) error + // LabelNames returns all label names that is available. + LabelNames(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) + // LabelValues returns all label values for given label name. + LabelValues(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) } // UnimplementedStoreGatewayServer can be embedded to have forward compatible implementations. @@ -125,6 +155,12 @@ type UnimplementedStoreGatewayServer struct { func (*UnimplementedStoreGatewayServer) Series(req *storepb.SeriesRequest, srv StoreGateway_SeriesServer) error { return status.Errorf(codes.Unimplemented, "method Series not implemented") } +func (*UnimplementedStoreGatewayServer) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") +} +func (*UnimplementedStoreGatewayServer) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") +} func RegisterStoreGatewayServer(s *grpc.Server, srv StoreGatewayServer) { s.RegisterService(&_StoreGateway_serviceDesc, srv) @@ -151,10 +187,55 @@ func (x *storeGatewaySeriesServer) Send(m *storepb.SeriesResponse) error { return x.ServerStream.SendMsg(m) } +func _StoreGateway_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(storepb.LabelNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreGatewayServer).LabelNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gatewaypb.StoreGateway/LabelNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreGatewayServer).LabelNames(ctx, req.(*storepb.LabelNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StoreGateway_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(storepb.LabelValuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreGatewayServer).LabelValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gatewaypb.StoreGateway/LabelValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreGatewayServer).LabelValues(ctx, req.(*storepb.LabelValuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _StoreGateway_serviceDesc = grpc.ServiceDesc{ ServiceName: "gatewaypb.StoreGateway", HandlerType: (*StoreGatewayServer)(nil), - Methods: []grpc.MethodDesc{}, + Methods: []grpc.MethodDesc{ + { + MethodName: "LabelNames", + Handler: _StoreGateway_LabelNames_Handler, + }, + { + MethodName: "LabelValues", + Handler: _StoreGateway_LabelValues_Handler, + }, + }, Streams: []grpc.StreamDesc{ { StreamName: "Series", diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto index fdde78fe87e1d..14e65859c2791 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto @@ -14,4 +14,10 @@ service StoreGateway { // // Series are sorted. rpc Series(thanos.SeriesRequest) returns (stream thanos.SeriesResponse); + + // LabelNames returns all label names that is available. + rpc LabelNames(thanos.LabelNamesRequest) returns (thanos.LabelNamesResponse); + + // LabelValues returns all label values for given label name. + rpc LabelValues(thanos.LabelValuesRequest) returns (thanos.LabelValuesResponse); } diff --git a/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go b/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go new file mode 100644 index 0000000000000..e5fbea25298c9 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go @@ -0,0 +1,132 @@ +package tenant + +import ( + "context" + "net/http" + "strings" + + "github.com/weaveworks/common/user" +) + +var defaultResolver Resolver = NewSingleResolver() + +// WithDefaultResolver updates the resolver used for the package methods. +func WithDefaultResolver(r Resolver) { + defaultResolver = r +} + +// TenantID returns exactly a single tenant ID from the context. It should be +// used when a certain endpoint should only support exactly a single +// tenant ID. It returns an error user.ErrNoOrgID if there is no tenant ID +// supplied or user.ErrTooManyOrgIDs if there are multiple tenant IDs present. +// +// ignore stutter warning +//nolint:golint +func TenantID(ctx context.Context) (string, error) { + return defaultResolver.TenantID(ctx) +} + +// TenantIDs returns all tenant IDs from the context. It should return +// normalized list of ordered and distinct tenant IDs (as produced by +// NormalizeTenantIDs). +// +// ignore stutter warning +//nolint:golint +func TenantIDs(ctx context.Context) ([]string, error) { + return defaultResolver.TenantIDs(ctx) +} + +type Resolver interface { + // TenantID returns exactly a single tenant ID from the context. It should be + // used when a certain endpoint should only support exactly a single + // tenant ID. It returns an error user.ErrNoOrgID if there is no tenant ID + // supplied or user.ErrTooManyOrgIDs if there are multiple tenant IDs present. + TenantID(context.Context) (string, error) + + // TenantIDs returns all tenant IDs from the context. It should return + // normalized list of ordered and distinct tenant IDs (as produced by + // NormalizeTenantIDs). + TenantIDs(context.Context) ([]string, error) +} + +// NewSingleResolver creates a tenant resolver, which restricts all requests to +// be using a single tenant only. This allows a wider set of characters to be +// used within the tenant ID and should not impose a breaking change. +func NewSingleResolver() *SingleResolver { + return &SingleResolver{} +} + +type SingleResolver struct { +} + +func (t *SingleResolver) TenantID(ctx context.Context) (string, error) { + //lint:ignore faillint wrapper around upstream method + return user.ExtractOrgID(ctx) +} + +func (t *SingleResolver) TenantIDs(ctx context.Context) ([]string, error) { + //lint:ignore faillint wrapper around upstream method + orgID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + return []string{orgID}, err +} + +type MultiResolver struct { +} + +// NewMultiResolver creates a tenant resolver, which allows request to have +// multiple tenant ids submitted separated by a '|' character. This enforces +// further limits on the character set allowed within tenants as detailed here: +// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) +func NewMultiResolver() *MultiResolver { + return &MultiResolver{} +} + +func (t *MultiResolver) TenantID(ctx context.Context) (string, error) { + orgIDs, err := t.TenantIDs(ctx) + if err != nil { + return "", err + } + + if len(orgIDs) > 1 { + return "", user.ErrTooManyOrgIDs + } + + return orgIDs[0], nil +} + +func (t *MultiResolver) TenantIDs(ctx context.Context) ([]string, error) { + //lint:ignore faillint wrapper around upstream method + orgID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + orgIDs := strings.Split(orgID, tenantIDsLabelSeparator) + for _, orgID := range orgIDs { + if err := ValidTenantID(orgID); err != nil { + return nil, err + } + } + + return NormalizeTenantIDs(orgIDs), nil +} + +// ExtractTenantIDFromHTTPRequest extracts a single TenantID through a given +// resolver directly from a HTTP request. +func ExtractTenantIDFromHTTPRequest(req *http.Request) (string, context.Context, error) { + //lint:ignore faillint wrapper around upstream method + _, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil { + return "", nil, err + } + + tenantID, err := defaultResolver.TenantID(ctx) + if err != nil { + return "", nil, err + } + + return tenantID, ctx, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go new file mode 100644 index 0000000000000..102091c78b613 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go @@ -0,0 +1,89 @@ +package tenant + +import ( + "errors" + "fmt" + "sort" +) + +var ( + errTenantIDTooLong = errors.New("tenant ID is too long: max 150 characters") +) + +type errTenantIDUnsupportedCharacter struct { + pos int + tenantID string +} + +func (e *errTenantIDUnsupportedCharacter) Error() string { + return fmt.Sprintf( + "tenant ID '%s' contains unsupported character '%c'", + e.tenantID, + e.tenantID[e.pos], + ) +} + +const tenantIDsLabelSeparator = "|" + +// NormalizeTenantIDs is creating a normalized form by sortiing and de-duplicating the list of tenantIDs +func NormalizeTenantIDs(tenantIDs []string) []string { + sort.Strings(tenantIDs) + + count := len(tenantIDs) + if count <= 1 { + return tenantIDs + } + + posOut := 1 + for posIn := 1; posIn < count; posIn++ { + if tenantIDs[posIn] != tenantIDs[posIn-1] { + tenantIDs[posOut] = tenantIDs[posIn] + posOut++ + } + } + + return tenantIDs[0:posOut] +} + +// ValidTenantID +func ValidTenantID(s string) error { + // check if it contains invalid runes + for pos, r := range s { + if !isSupported(r) { + return &errTenantIDUnsupportedCharacter{ + tenantID: s, + pos: pos, + } + } + } + + if len(s) > 150 { + return errTenantIDTooLong + } + + return nil +} + +// this checks if a rune is supported in tenant IDs (according to +// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) +func isSupported(c rune) bool { + // characters + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') { + return true + } + + // digits + if '0' <= c && c <= '9' { + return true + } + + // special + return c == '!' || + c == '-' || + c == '_' || + c == '.' || + c == '*' || + c == '\'' || + c == '(' || + c == ')' +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go new file mode 100644 index 0000000000000..7110fb8c4a68f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go @@ -0,0 +1,25 @@ +package concurrency + +import ( + "bytes" + "sync" +) + +type SyncBuffer struct { + mu sync.Mutex + buf bytes.Buffer +} + +func (sb *SyncBuffer) Write(p []byte) (n int, err error) { + sb.mu.Lock() + defer sb.mu.Unlock() + + return sb.buf.Write(p) +} + +func (sb *SyncBuffer) String() string { + sb.mu.Lock() + defer sb.mu.Unlock() + + return sb.buf.String() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go new file mode 100644 index 0000000000000..9a18ade8dc0b1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go @@ -0,0 +1,64 @@ +package concurrency + +import ( + "context" + "sync" + + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" +) + +// ForEachUser runs the provided userFunc for each userIDs up to concurrency concurrent workers. +// In case userFunc returns error, it will continue to process remaining users but returns an +// error with all errors userFunc has returned. +func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFunc func(ctx context.Context, userID string) error) error { + wg := sync.WaitGroup{} + ch := make(chan string) + + // Keep track of all errors occurred. + errs := tsdb_errors.NewMulti() + errsMx := sync.Mutex{} + + for ix := 0; ix < concurrency; ix++ { + wg.Add(1) + go func() { + defer wg.Done() + + for userID := range ch { + // Ensure the context has not been canceled (ie. shutdown has been triggered). + if ctx.Err() != nil { + break + } + + if err := userFunc(ctx, userID); err != nil { + errsMx.Lock() + errs.Add(err) + errsMx.Unlock() + } + } + }() + } + +sendLoop: + for _, userID := range userIDs { + select { + case ch <- userID: + // ok + case <-ctx.Done(): + // don't start new tasks. + break sendLoop + } + } + + close(ch) + + // wait for ongoing workers to finish. + wg.Wait() + + if ctx.Err() != nil { + return ctx.Err() + } + + errsMx.Lock() + defer errsMx.Unlock() + return errs.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/dns_watcher.go b/vendor/github.com/cortexproject/cortex/pkg/util/dns_watcher.go new file mode 100644 index 0000000000000..d4af88f57ba6c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/dns_watcher.go @@ -0,0 +1,82 @@ +package util + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "google.golang.org/grpc/naming" + + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Notifications about address resolution. All notifications are sent on the same goroutine. +type DNSNotifications interface { + // New address has been discovered by DNS watcher for supplied hostname. + AddressAdded(address string) + + // Previously-discovered address is no longer resolved for the hostname. + AddressRemoved(address string) +} + +type dnsWatcher struct { + watcher naming.Watcher //nolint:staticcheck //Skipping for now. If you still see this more than likely issue https://github.com/cortexproject/cortex/issues/2015 has not yet been addressed. + notifications DNSNotifications +} + +// NewDNSWatcher creates a new DNS watcher and returns a service that is wrapping it. +func NewDNSWatcher(address string, dnsLookupPeriod time.Duration, notifications DNSNotifications) (services.Service, error) { + resolver, err := naming.NewDNSResolverWithFreq(dnsLookupPeriod) + if err != nil { + return nil, err + } + + watcher, err := resolver.Resolve(address) + if err != nil { + return nil, err + } + + w := &dnsWatcher{ + watcher: watcher, + notifications: notifications, + } + return services.NewBasicService(nil, w.watchDNSLoop, nil), nil +} + +// watchDNSLoop watches for changes in DNS and sends notifications. +func (w *dnsWatcher) watchDNSLoop(servCtx context.Context) error { + go func() { + // Close the watcher, when this service is asked to stop. + // Closing the watcher makes watchDNSLoop exit, since it only iterates on watcher updates, and has no other + // way to stop. We cannot close the watcher in `stopping` method, because it is only called *after* + // watchDNSLoop exits. + <-servCtx.Done() + w.watcher.Close() + }() + + for { + updates, err := w.watcher.Next() + if err != nil { + // watcher.Next returns error when Close is called, but we call Close when our context is done. + // we don't want to report error in that case. + if servCtx.Err() != nil { + return nil + } + return errors.Wrapf(err, "error from DNS watcher") + } + + for _, update := range updates { + switch update.Op { + case naming.Add: + w.notifications.AddressAdded(update.Addr) + + case naming.Delete: + w.notifications.AddressRemoved(update.Addr) + + default: + return fmt.Errorf("unknown op: %v", update.Op) + } + } + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/errors.go index c372e819466b2..0299ad25f7af3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/errors.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/errors.go @@ -1,6 +1,8 @@ package util -import "errors" +import ( + "errors" +) // ErrStopProcess is the error returned by a service as a hint to stop the server entirely. var ErrStopProcess = errors.New("stop process") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go b/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go index 42557e15240df..ee850e8045166 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go @@ -15,19 +15,27 @@ import ( // SetupAuthMiddleware for the given server config. func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []string) middleware.Interface { if enabled { - config.GRPCMiddleware = append(config.GRPCMiddleware, - middleware.ServerUserHeaderInterceptor, - ) + ignoredMethods := map[string]bool{} + for _, m := range noGRPCAuthOn { + ignoredMethods[m] = true + } + + config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if ignoredMethods[info.FullMethod] { + return handler(ctx, req) + } + return middleware.ServerUserHeaderInterceptor(ctx, req, info, handler) + }) + config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - for _, path := range noGRPCAuthOn { - if info.FullMethod == path { - return handler(srv, ss) - } + if ignoredMethods[info.FullMethod] { + return handler(srv, ss) } return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) }, ) + return middleware.AuthenticateUser } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go index 8a73616946b52..522011fb01c6a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -2,6 +2,7 @@ package grpcclient import ( "flag" + "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -9,6 +10,7 @@ import ( "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/keepalive" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -90,6 +92,11 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep grpc.WithDefaultCallOptions(cfg.CallOptions()...), grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryClientInterceptors...)), grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(streamClientInterceptors...)), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Second * 20, + Timeout: time.Second * 10, + PermitWithoutStream: true, + }), } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/carrier.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/carrier.go new file mode 100644 index 0000000000000..1c6dee7ff5a22 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/carrier.go @@ -0,0 +1,40 @@ +package grpcutil + +import ( + "github.com/opentracing/opentracing-go" + "github.com/weaveworks/common/httpgrpc" +) + +// Used to transfer trace information from/to HTTP request. +type HttpgrpcHeadersCarrier httpgrpc.HTTPRequest + +func (c *HttpgrpcHeadersCarrier) Set(key, val string) { + c.Headers = append(c.Headers, &httpgrpc.Header{ + Key: key, + Values: []string{val}, + }) +} + +func (c *HttpgrpcHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for _, h := range c.Headers { + for _, v := range h.Values { + if err := handler(h.Key, v); err != nil { + return err + } + } + } + return nil +} + +func GetParentSpanForRequest(tracer opentracing.Tracer, req *httpgrpc.HTTPRequest) (opentracing.SpanContext, error) { + if tracer == nil { + return nil, nil + } + + carrier := (*HttpgrpcHeadersCarrier)(req) + extracted, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + if err == opentracing.ErrSpanContextNotFound { + err = nil + } + return extracted, err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go index a34e02eea40dd..f04e8065fe951 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go @@ -11,7 +11,8 @@ import ( "github.com/weaveworks/common/logging" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" - "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/tenant" ) var ( @@ -105,7 +106,7 @@ func (pl *PrometheusLogger) Log(kv ...interface{}) error { func WithContext(ctx context.Context, l log.Logger) log.Logger { // Weaveworks uses "orgs" and "orgID" to represent Cortex users, // even though the code-base generally uses `userID` to refer to the same thing. - userID, err := user.ExtractOrgID(ctx) + userID, err := tenant.TenantID(ctx) if err == nil { l = WithUserID(userID, l) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index 20c6119675302..172bfddfcf157 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -120,32 +120,15 @@ func (mfm MetricFamilyMap) sumOfSingleValuesWithLabels(metric string, labelNames // MetricFamiliesPerUser is a collection of metrics gathered via calling Gatherer.Gather() method on different // gatherers, one per user. -type MetricFamiliesPerUser map[string]MetricFamilyMap - -func BuildMetricFamiliesPerUserFromUserRegistries(regs map[string]*prometheus.Registry) MetricFamiliesPerUser { - data := MetricFamiliesPerUser{} - for userID, r := range regs { - m, err := r.Gather() - if err == nil { - var mfm MetricFamilyMap // := would shadow err from outer block, and single err check will not work - mfm, err = NewMetricFamilyMap(m) - if err == nil { - data[userID] = mfm - } - } - - if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", userID, "err", err) - continue - } - } - return data +type MetricFamiliesPerUser []struct { + user string + metrics MetricFamilyMap } func (d MetricFamiliesPerUser) GetSumOfCounters(counter string) float64 { result := float64(0) - for _, userMetrics := range d { - result += userMetrics.SumCounters(counter) + for _, userEntry := range d { + result += userEntry.metrics.SumCounters(counter) } return result } @@ -159,28 +142,28 @@ func (d MetricFamiliesPerUser) SendSumOfCountersWithLabels(out chan<- prometheus } func (d MetricFamiliesPerUser) SendSumOfCountersPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, counter string) { - for user, userMetrics := range d { - v := userMetrics.SumCounters(counter) - - out <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, v, user) - } + d.SendSumOfCountersPerUserWithLabels(out, desc, counter) } // SendSumOfCountersPerUserWithLabels provides metrics with the provided label names on a per-user basis. This function assumes that `user` is the // first label on the provided metric Desc func (d MetricFamiliesPerUser) SendSumOfCountersPerUserWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, metric string, labelNames ...string) { - for user, userMetrics := range d { + for _, userEntry := range d { + if userEntry.user == "" { + continue + } + result := singleValueWithLabelsMap{} - userMetrics.sumOfSingleValuesWithLabels(metric, labelNames, counterValue, result.aggregateFn) - result.prependUserLabelValue(user) + userEntry.metrics.sumOfSingleValuesWithLabels(metric, labelNames, counterValue, result.aggregateFn) + result.prependUserLabelValue(userEntry.user) result.WriteToMetricChannel(out, desc, prometheus.CounterValue) } } func (d MetricFamiliesPerUser) GetSumOfGauges(gauge string) float64 { result := float64(0) - for _, userMetrics := range d { - result += userMetrics.SumGauges(gauge) + for _, userEntry := range d { + result += userEntry.metrics.SumGauges(gauge) } return result } @@ -193,29 +176,37 @@ func (d MetricFamiliesPerUser) SendSumOfGaugesWithLabels(out chan<- prometheus.M d.sumOfSingleValuesWithLabels(gauge, gaugeValue, labelNames).WriteToMetricChannel(out, desc, prometheus.GaugeValue) } +func (d MetricFamiliesPerUser) SendSumOfGaugesPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string) { + d.SendSumOfGaugesPerUserWithLabels(out, desc, gauge) +} + // SendSumOfGaugesPerUserWithLabels provides metrics with the provided label names on a per-user basis. This function assumes that `user` is the // first label on the provided metric Desc func (d MetricFamiliesPerUser) SendSumOfGaugesPerUserWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, metric string, labelNames ...string) { - for user, userMetrics := range d { + for _, userEntry := range d { + if userEntry.user == "" { + continue + } + result := singleValueWithLabelsMap{} - userMetrics.sumOfSingleValuesWithLabels(metric, labelNames, gaugeValue, result.aggregateFn) - result.prependUserLabelValue(user) + userEntry.metrics.sumOfSingleValuesWithLabels(metric, labelNames, gaugeValue, result.aggregateFn) + result.prependUserLabelValue(userEntry.user) result.WriteToMetricChannel(out, desc, prometheus.GaugeValue) } } func (d MetricFamiliesPerUser) sumOfSingleValuesWithLabels(metric string, fn func(*dto.Metric) float64, labelNames []string) singleValueWithLabelsMap { result := singleValueWithLabelsMap{} - for _, userMetrics := range d { - userMetrics.sumOfSingleValuesWithLabels(metric, labelNames, fn, result.aggregateFn) + for _, userEntry := range d { + userEntry.metrics.sumOfSingleValuesWithLabels(metric, labelNames, fn, result.aggregateFn) } return result } func (d MetricFamiliesPerUser) SendMaxOfGauges(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string) { result := math.NaN() - for _, userMetrics := range d { - if value := userMetrics.MaxGauges(gauge); math.IsNaN(result) || value > result { + for _, userEntry := range d { + if value := userEntry.metrics.MaxGauges(gauge); math.IsNaN(result) || value > result { result = value } } @@ -228,10 +219,21 @@ func (d MetricFamiliesPerUser) SendMaxOfGauges(out chan<- prometheus.Metric, des out <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, result) } +func (d MetricFamiliesPerUser) SendMaxOfGaugesPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, gauge string) { + for _, userEntry := range d { + if userEntry.user == "" { + continue + } + + result := userEntry.metrics.MaxGauges(gauge) + out <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, result, userEntry.user) + } +} + func (d MetricFamiliesPerUser) SendSumOfSummaries(out chan<- prometheus.Metric, desc *prometheus.Desc, summaryName string) { summaryData := SummaryData{} - for _, userMetrics := range d { - userMetrics.SumSummariesTo(summaryName, &summaryData) + for _, userEntry := range d { + userEntry.metrics.SumSummariesTo(summaryName, &summaryData) } out <- summaryData.Metric(desc) } @@ -244,8 +246,8 @@ func (d MetricFamiliesPerUser) SendSumOfSummariesWithLabels(out chan<- prometheu result := map[string]summaryResult{} - for _, userMetrics := range d { - metricsPerLabelValue := getMetricsWithLabelNames(userMetrics[summaryName], labelNames) + for _, mfm := range d { + metricsPerLabelValue := getMetricsWithLabelNames(mfm.metrics[summaryName], labelNames) for key, mwl := range metricsPerLabelValue { for _, m := range mwl.metrics { @@ -266,16 +268,20 @@ func (d MetricFamiliesPerUser) SendSumOfSummariesWithLabels(out chan<- prometheu } func (d MetricFamiliesPerUser) SendSumOfSummariesPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, summaryName string) { - for user, userMetrics := range d { - data := userMetrics.SumSummaries(summaryName) - out <- data.Metric(desc, user) + for _, userEntry := range d { + if userEntry.user == "" { + continue + } + + data := userEntry.metrics.SumSummaries(summaryName) + out <- data.Metric(desc, userEntry.user) } } func (d MetricFamiliesPerUser) SendSumOfHistograms(out chan<- prometheus.Metric, desc *prometheus.Desc, histogramName string) { hd := HistogramData{} - for _, userMetrics := range d { - userMetrics.SumHistogramsTo(histogramName, &hd) + for _, userEntry := range d { + userEntry.metrics.SumHistogramsTo(histogramName, &hd) } out <- hd.Metric(desc) } @@ -288,8 +294,8 @@ func (d MetricFamiliesPerUser) SendSumOfHistogramsWithLabels(out chan<- promethe result := map[string]histogramResult{} - for _, userMetrics := range d { - metricsPerLabelValue := getMetricsWithLabelNames(userMetrics[histogramName], labelNames) + for _, mfm := range d { + metricsPerLabelValue := getMetricsWithLabelNames(mfm.metrics[histogramName], labelNames) for key, mwl := range metricsPerLabelValue { for _, m := range mwl.metrics { @@ -498,3 +504,160 @@ func (h *HistogramDataCollector) Add(hd HistogramData) { h.data.AddHistogramData(hd) } + +// UserRegistry holds a Prometheus registry associated to a specific user. +type UserRegistry struct { + user string // Set to "" when registry is soft-removed. + reg *prometheus.Registry // Set to nil, when registry is soft-removed. + + // Set to last result of Gather() call when removing registry. + lastGather MetricFamilyMap +} + +// UserRegistries holds Prometheus registries for multiple users, guaranteeing +// multi-thread safety and stable ordering. +type UserRegistries struct { + regsMu sync.Mutex + regs []UserRegistry +} + +// NewUserRegistries makes new UserRegistries. +func NewUserRegistries() *UserRegistries { + return &UserRegistries{} +} + +// AddUserRegistry adds an user registry. If user already has a registry, +// previous registry is removed, but latest metric values are preserved +// in order to avoid counter resets. +func (r *UserRegistries) AddUserRegistry(user string, reg *prometheus.Registry) { + r.regsMu.Lock() + defer r.regsMu.Unlock() + + // Soft-remove user registry, if user has one already. + for idx := 0; idx < len(r.regs); { + if r.regs[idx].user != user { + idx++ + continue + } + + if r.softRemoveUserRegistry(&r.regs[idx]) { + // Keep it. + idx++ + } else { + // Remove it. + r.regs = append(r.regs[:idx], r.regs[idx+1:]...) + } + } + + // New registries must be added to the end of the list, to guarantee stability. + r.regs = append(r.regs, UserRegistry{ + user: user, + reg: reg, + }) +} + +// RemoveUserRegistry removes all Prometheus registries for a given user. +// If hard is true, registry is removed completely. +// If hard is false, latest registry values are preserved for future aggregations. +func (r *UserRegistries) RemoveUserRegistry(user string, hard bool) { + r.regsMu.Lock() + defer r.regsMu.Unlock() + + for idx := 0; idx < len(r.regs); { + if user != r.regs[idx].user { + idx++ + continue + } + + if !hard && r.softRemoveUserRegistry(&r.regs[idx]) { + idx++ // keep it + } else { + r.regs = append(r.regs[:idx], r.regs[idx+1:]...) // remove it. + } + } +} + +// Returns true, if we should keep latest metrics. Returns false if we failed to gather latest metrics, +// and this can be removed completely. +func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool { + last, err := ur.reg.Gather() + if err != nil { + level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + return false + } + + for ix := 0; ix < len(last); { + // Only keep metrics for which we don't want to go down, since that indicates reset (counter, summary, histogram). + switch last[ix].GetType() { + case dto.MetricType_COUNTER, dto.MetricType_SUMMARY, dto.MetricType_HISTOGRAM: + ix++ + default: + // Remove gauges and unknowns. + last = append(last[:ix], last[ix+1:]...) + } + } + + // No metrics left. + if len(last) == 0 { + return false + } + + ur.lastGather, err = NewMetricFamilyMap(last) + if err != nil { + level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + return false + } + + ur.user = "" + ur.reg = nil + return true +} + +// Registries returns a copy of the user registries list. +func (r *UserRegistries) Registries() []UserRegistry { + r.regsMu.Lock() + defer r.regsMu.Unlock() + + out := make([]UserRegistry, 0, len(r.regs)) + out = append(out, r.regs...) + + return out +} + +func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser { + data := MetricFamiliesPerUser{} + for _, entry := range r.Registries() { + // Set for removed users. + if entry.reg == nil { + if entry.lastGather != nil { + data = append(data, struct { + user string + metrics MetricFamilyMap + }{user: "", metrics: entry.lastGather}) + } + + continue + } + + m, err := entry.reg.Gather() + if err == nil { + var mfm MetricFamilyMap // := would shadow err from outer block, and single err check will not work + mfm, err = NewMetricFamilyMap(m) + if err == nil { + data = append(data, struct { + user string + metrics MetricFamilyMap + }{ + user: entry.user, + metrics: mfm, + }) + } + } + + if err != nil { + level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err) + continue + } + } + return data +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go b/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go index 5a2b49d30e3de..e9a28b046434f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/modules/modules.go @@ -108,7 +108,7 @@ func (m *Manager) initModule(name string, initMap map[string]bool, servicesMap m if s != nil { // We pass servicesMap, which isn't yet complete. By the time service starts, // it will be fully built, so there is no need for extra synchronization. - serv = newModuleServiceWrapper(servicesMap, n, s, mod.deps, m.findInverseDependencies(n, deps[ix+1:])) + serv = newModuleServiceWrapper(servicesMap, n, s, m.DependenciesForModule(n), m.findInverseDependencies(n, deps[ix+1:])) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go b/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go new file mode 100644 index 0000000000000..e3a863ad26972 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/process/collector.go @@ -0,0 +1,132 @@ +package process + +import ( + "bufio" + "bytes" + "errors" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" +) + +var ( + ErrUnsupportedCollector = errors.New("unsupported platform") +) + +type processCollector struct { + pid int + procMountPoint string + + // Metrics. + currMaps *prometheus.Desc + maxMaps *prometheus.Desc +} + +// NewProcessCollector makes a new custom process collector used to collect process metrics the +// default instrumentation doesn't support. +func NewProcessCollector() (prometheus.Collector, error) { + return newProcessCollector(os.Getpid(), DefaultProcMountPoint) +} + +func newProcessCollector(pid int, procMountPoint string) (prometheus.Collector, error) { + // Check whether it's supported on this platform. + if !isSupported(procMountPoint) { + return nil, ErrUnsupportedCollector + } + + return &processCollector{ + pid: pid, + procMountPoint: procMountPoint, + currMaps: prometheus.NewDesc( + "process_memory_map_areas", + "Number of memory map areas allocated by the process.", + nil, nil, + ), + maxMaps: prometheus.NewDesc( + "process_memory_map_areas_limit", + "Maximum number of memory map ares the process can allocate.", + nil, nil, + ), + }, nil +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.currMaps + ch <- c.maxMaps +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- prometheus.Metric) { + if value, err := c.getMapsCount(); err == nil { + ch <- prometheus.MustNewConstMetric(c.currMaps, prometheus.GaugeValue, value) + } + + if value, err := c.getMapsCountLimit(); err == nil { + ch <- prometheus.MustNewConstMetric(c.maxMaps, prometheus.GaugeValue, value) + } +} + +// getMapsCount returns the number of memory map ares the process has allocated. +func (c *processCollector) getMapsCount() (float64, error) { + file, err := os.Open(processMapsPath(c.procMountPoint, c.pid)) + if err != nil { + return 0, err + } + defer file.Close() + + count := 0 + scan := bufio.NewScanner(file) + for scan.Scan() { + count++ + } + + return float64(count), scan.Err() +} + +// getMapsCountLimit returns the maximum of memory map ares the process can allocate. +func (c *processCollector) getMapsCountLimit() (float64, error) { + file, err := os.Open(vmMapsLimitPath(c.procMountPoint)) + if err != nil { + return 0, err + } + defer file.Close() + + content, err := ioutil.ReadAll(file) + if err != nil { + return 0, err + } + + content = bytes.TrimSpace(content) + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + value, err := strconv.ParseInt(string(content), base, 64) + if err != nil { + return 0, err + } + + return float64(value), nil +} + +func isSupported(procPath string) bool { + _, err := os.Stat(vmMapsLimitPath(procPath)) + return err == nil +} + +func processMapsPath(procPath string, pid int) string { + return filepath.Join(procPath, strconv.Itoa(pid), "maps") +} + +func vmMapsLimitPath(procPath string) string { + return filepath.Join(procPath, "sys", "vm", "max_map_count") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go index b376d2585e5e8..056e59ae271b9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go @@ -12,37 +12,63 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) +type loggerCtxMarker struct{} + +var ( + loggerCtxKey = &loggerCtxMarker{} +) + // SpanLogger unifies tracing and logging, to reduce repetition. type SpanLogger struct { log.Logger opentracing.Span } -// New makes a new SpanLogger. +// New makes a new SpanLogger, where logs will be sent to the global logger. func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) { + return NewWithLogger(ctx, util.Logger, method, kvps...) +} + +// NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs +// to. The provided context will have the logger attached to it and can be +// retrieved with FromContext or FromContextWithFallback. +func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { span, ctx := opentracing.StartSpanFromContext(ctx, method) logger := &SpanLogger{ - Logger: log.With(util.WithContext(ctx, util.Logger), "method", method), + Logger: log.With(util.WithContext(ctx, l), "method", method), Span: span, } if len(kvps) > 0 { level.Debug(logger).Log(kvps...) } + + ctx = context.WithValue(ctx, loggerCtxKey, l) return logger, ctx } -// FromContext returns a span logger using the current parent span. -// If there is no parent span, the Spanlogger will only log to stdout. +// FromContext returns a span logger using the current parent span. If there +// is no parent span, the SpanLogger will only log to the logger +// in the context. If the context doesn't have a logger, the global logger +// is used. func FromContext(ctx context.Context) *SpanLogger { + return FromContextWithFallback(ctx, util.Logger) +} + +// FromContextWithFallback returns a span logger using the current parent span. +// IF there is no parent span, the SpanLogger will only log to the logger +// within the context. If the context doesn't have a logger, the fallback +// logger is used. +func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogger { + logger, ok := ctx.Value(loggerCtxKey).(log.Logger) + if !ok { + logger = fallback + } sp := opentracing.SpanFromContext(ctx) if sp == nil { - return &SpanLogger{ - Logger: util.WithContext(ctx, util.Logger), - Span: defaultNoopSpan, - } + sp = defaultNoopSpan } return &SpanLogger{ - Logger: util.WithContext(ctx, util.Logger), + Logger: util.WithContext(ctx, logger), Span: sp, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go b/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go index 3429a8525865f..fdd33264c9478 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/test/poll.go @@ -21,6 +21,6 @@ func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface } h := have() if !reflect.DeepEqual(want, h) { - t.Fatalf("%v != %v", want, h) + t.Fatalf("expected %v, got %v", want, h) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/time.go b/vendor/github.com/cortexproject/cortex/pkg/util/time.go index 45a4624565df5..7b55613e61ce4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/time.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/time.go @@ -7,6 +7,7 @@ import ( "strconv" "time" + "github.com/prometheus/common/model" "github.com/weaveworks/common/httpgrpc" ) @@ -23,6 +24,16 @@ func TimeFromMillis(ms int64) time.Time { return time.Unix(0, ms*nanosecondsInMillisecond) } +// FormatTimeMillis returns a human readable version of the input time (in milliseconds). +func FormatTimeMillis(ms int64) string { + return TimeFromMillis(ms).String() +} + +// FormatTimeModel returns a human readable version of the input time. +func FormatTimeModel(t model.Time) string { + return TimeFromMillis(int64(t)).String() +} + // ParseTime parses the string into an int64, milliseconds since epoch. func ParseTime(s string) (int64, error) { if t, err := strconv.ParseFloat(s, 64); err == nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index eb4e0ddafdbb9..1a99f9d8f16de 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -5,6 +5,8 @@ import ( "flag" "time" + "github.com/prometheus/prometheus/pkg/relabel" + "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -46,6 +48,7 @@ type Limits struct { EnforceMetadataMetricName bool `yaml:"enforce_metadata_metric_name"` EnforceMetricName bool `yaml:"enforce_metric_name"` IngestionTenantShardSize int `yaml:"ingestion_tenant_shard_size"` + MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty" doc:"nocli|description=List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs."` // Ingester enforced limits. // Series @@ -64,6 +67,7 @@ type Limits struct { // Querier enforced limits. MaxChunksPerQuery int `yaml:"max_chunks_per_query"` + MaxQueryLookback time.Duration `yaml:"max_query_lookback"` MaxQueryLength time.Duration `yaml:"max_query_length"` MaxQueryParallelism int `yaml:"max_query_parallelism"` CardinalityLimit int `yaml:"cardinality_limit"` @@ -119,10 +123,11 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage. When running the Cortex chunks storage, this limit is enforced in the querier, while when running the Cortex blocks storage this limit is both enforced in the querier and store-gateway. 0 to disable.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") - f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") + f.DurationVar(&l.MaxQueryLookback, "querier.max-query-lookback", 0, "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") + f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of split queries will be scheduled in parallel by the frontend.") f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") - f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends). This option only works with queriers connecting to the query-frontend, not when using downstream URL.") + f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.") f.DurationVar(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.") f.IntVar(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.") @@ -305,12 +310,18 @@ func (o *Overrides) MaxChunksPerQuery(userID string) int { return o.getOverridesForUser(userID).MaxChunksPerQuery } +// MaxQueryLookback returns the max lookback period of queries. +func (o *Overrides) MaxQueryLookback(userID string) time.Duration { + return o.getOverridesForUser(userID).MaxQueryLookback +} + // MaxQueryLength returns the limit of the length (in time) of a query. func (o *Overrides) MaxQueryLength(userID string) time.Duration { return o.getOverridesForUser(userID).MaxQueryLength } -// MaxCacheFreshness returns the limit of the length (in time) of a query. +// MaxCacheFreshness returns the period after which results are cacheable, +// to prevent caching of very recent results. func (o *Overrides) MaxCacheFreshness(userID string) time.Duration { return o.getOverridesForUser(userID).MaxCacheFreshness } @@ -320,7 +331,7 @@ func (o *Overrides) MaxQueriersPerUser(userID string) int { return o.getOverridesForUser(userID).MaxQueriersPerTenant } -// MaxQueryParallelism returns the limit to the number of sub-queries the +// MaxQueryParallelism returns the limit to the number of split queries the // frontend will process in parallel. func (o *Overrides) MaxQueryParallelism(userID string) int { return o.getOverridesForUser(userID).MaxQueryParallelism @@ -376,6 +387,11 @@ func (o *Overrides) EvaluationDelay(userID string) time.Duration { return o.getOverridesForUser(userID).RulerEvaluationDelay } +// MetricRelabelConfigs returns the metric relabel configs for a given user. +func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config { + return o.getOverridesForUser(userID).MetricRelabelConfigs +} + // RulerTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy. func (o *Overrides) RulerTenantShardSize(userID string) int { return o.getOverridesForUser(userID).RulerTenantShardSize diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 1cbd32160e767..3ae4f54b4f884 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -179,7 +179,7 @@ type MetadataValidationConfig interface { // ValidateMetadata returns an err if a metric metadata is invalid. func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *client.MetricMetadata) error { - if cfg.EnforceMetadataMetricName(userID) && metadata.MetricName == "" { + if cfg.EnforceMetadataMetricName(userID) && metadata.GetMetricFamilyName() == "" { DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName) } @@ -188,10 +188,10 @@ func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *cli var reason string var cause string var metadataType string - if len(metadata.MetricName) > maxMetadataValueLength { + if len(metadata.GetMetricFamilyName()) > maxMetadataValueLength { metadataType = typeMetricName reason = metricNameTooLong - cause = metadata.MetricName + cause = metadata.GetMetricFamilyName() } else if len(metadata.Help) > maxMetadataValueLength { metadataType = typeHelp reason = helpTooLong @@ -204,7 +204,7 @@ func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *cli if reason != "" { DiscardedMetadata.WithLabelValues(reason, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.MetricName) + return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.GetMetricFamilyName()) } return nil diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 2aa6ac5740272..24fdacbf9e138 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,43 @@ # Change Log +## [v1.52.0] - 2020-11-05 + +- #411 - @nicktate - apps: add unspecified type to image source registry types +- #409 - @andrewsomething - registry: Add support for updating a subscription. +- #408 - @nicktate - apps: update spec to include image source +- #407 - @kamaln7 - apps: add the option to force build a new deployment + +## [v1.51.0] - 2020-11-02 + +- #405 - @adamwg - registry: Support subscription options +- #398 - @reeseconor - Add support for caching dependencies between GitHub Action runs +- #404 - @andrewsomething - CONTRIBUTING.md: Suggest using github-changelog-generator. + +## [v1.50.0] - 2020-10-26 + +- #400 - @waynr - registry: add garbage collection support +- #402 - @snormore - apps: add catchall_document static site spec field and failed-deploy job type +- #401 - @andrewlouis93 - VPC: adds option to set a VPC as the regional default + +## [v1.49.0] - 2020-10-21 + +- #383 - @kamaln7 - apps: add ListRegions, Get/ListTiers, Get/ListInstanceSizes +- #390 - @snormore - apps: add service spec internal_ports + +## [v1.48.0] - 2020-10-16 + +- #388 - @varshavaradarajan - kubernetes - change docr integration api routes +- #386 - @snormore - apps: pull in recent updates to jobs and domains + +## [v1.47.0] - 2020-10-14 + +- #384 kubernetes - add registry related doks apis - @varshavaradarajan +- #385 Fixed some typo in apps.gen.go and databases.go file - @devil-cyber +- #382 Add GetKubeConfigWithExpiry (#334) - @ivanlemeshev +- #381 Fix golint issues #377 - @sidsbrmnn +- #380 refactor: Cyclomatic complexity issue - @DonRenando +- #379 Run gofmt to fix some issues in codebase - @mycodeself + ## [v1.46.0] - 2020-10-05 - #373 load balancers: add LB size field, currently in closed beta - @anitgandhi diff --git a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md index d6f453baa7959..23bbe202cef24 100644 --- a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md +++ b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md @@ -29,7 +29,7 @@ version number. Any code merged to main is subject to release. ## Releasing -Releasing a new version of godo is currently a manual process. +Releasing a new version of godo is currently a manual process. Submit a separate pull request for the version change from the pull request with your changes. @@ -38,17 +38,32 @@ request with your changes. for the next (unreleased) version does not exist, create one. Include one bullet point for each piece of new functionality in the release, including the pull request ID, description, and author(s). + For example: ``` ## [v1.8.0] - 2019-03-13 -- #210 Expose tags on storage volume create/list/get. - @jcodybaker -- #123 Update test dependencies - @digitalocean +- #210 - @jcodybaker - Expose tags on storage volume create/list/get. +- #123 - @digitalocean - Update test dependencies +``` + + To generate a list of changes since the previous release in the correct + format, you can use [github-changelog-generator](https://github.com/digitalocean/github-changelog-generator). + It can be installed from source by running: + +``` +go get -u github.com/digitalocean/github-changelog-generator +``` + + Next, list the changes by running: + +``` +github-changelog-generator -org digitalocean -repo godo ``` 2. Update the `libraryVersion` number in `godo.go`. 3. Make a pull request with these changes. This PR should be separate from the PR containing the godo changes. -4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new). -5. Update the `Tag version` and `Release title` field with the new godo version. Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`. +4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new). +5. Update the `Tag version` and `Release title` field with the new godo version. Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`. 6. Copy the changelog bullet points to the description field. 7. Publish the release. diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index d39086938ac1d..9e618c771f928 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -20,6 +20,9 @@ type App struct { LastDeploymentCreatedAt time.Time `json:"last_deployment_created_at,omitempty"` LiveURL string `json:"live_url,omitempty"` Region *AppRegion `json:"region,omitempty"` + TierSlug string `json:"tier_slug,omitempty"` + LiveURLBase string `json:"live_url_base,omitempty"` + LiveDomain string `json:"live_domain,omitempty"` } // AppDatabaseSpec struct for AppDatabaseSpec @@ -58,6 +61,9 @@ type AppDomainSpec struct { // The hostname. Domain string `json:"domain"` Type AppDomainSpecType `json:"type,omitempty"` + // Whether the domain includes all sub-domains, in addition to the given domain. + Wildcard bool `json:"wildcard,omitempty"` + Zone string `json:"zone,omitempty"` } // AppDomainSpecType - DEFAULT: The default .ondigitalocean.app domain assigned to this app. - PRIMARY: The primary domain for this app. This is the domain that is displayed as the default in the control panel, used in bindable environment variables, and any other places that reference an app's live URL. Only one domain may be set as primary. - ALIAS: A non-primary domain. @@ -77,6 +83,7 @@ type AppJobSpec struct { Name string `json:"name"` Git *GitSourceSpec `json:"git,omitempty"` GitHub *GitHubSourceSpec `json:"github,omitempty"` + Image *ImageSourceSpec `json:"image,omitempty"` // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. DockerfilePath string `json:"dockerfile_path,omitempty"` // An optional build command to run while building this component from source. @@ -90,10 +97,22 @@ type AppJobSpec struct { // A list of environment variables made available to the component. Envs []*AppVariableDefinition `json:"envs,omitempty"` // The instance size to use for this component. - InstanceSizeSlug string `json:"instance_size_slug,omitempty"` - InstanceCount int64 `json:"instance_count,omitempty"` + InstanceSizeSlug string `json:"instance_size_slug,omitempty"` + InstanceCount int64 `json:"instance_count,omitempty"` + Kind AppJobSpecKind `json:"kind,omitempty"` } +// AppJobSpecKind - UNSPECIFIED: Default job type, will auto-complete to POST_DEPLOY kind. - PRE_DEPLOY: Indicates a job that runs before an app deployment. - POST_DEPLOY: Indicates a job that runs after an app deployment. - FAILED_DEPLOY: Indicates a job that runs after a component fails to deploy. +type AppJobSpecKind string + +// List of AppJobSpecKind +const ( + AppJobSpecKind_Unspecified AppJobSpecKind = "UNSPECIFIED" + AppJobSpecKind_PreDeploy AppJobSpecKind = "PRE_DEPLOY" + AppJobSpecKind_PostDeploy AppJobSpecKind = "POST_DEPLOY" + AppJobSpecKind_FailedDeploy AppJobSpecKind = "FAILED_DEPLOY" +) + // AppRouteSpec struct for AppRouteSpec type AppRouteSpec struct { // An HTTP path prefix. Paths must start with / and must be unique across all components within an app. @@ -106,6 +125,7 @@ type AppServiceSpec struct { Name string `json:"name"` Git *GitSourceSpec `json:"git,omitempty"` GitHub *GitHubSourceSpec `json:"github,omitempty"` + Image *ImageSourceSpec `json:"image,omitempty"` // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. DockerfilePath string `json:"dockerfile_path,omitempty"` // An optional build command to run while building this component from source. @@ -117,15 +137,17 @@ type AppServiceSpec struct { // An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/). EnvironmentSlug string `json:"environment_slug,omitempty"` // A list of environment variables made available to the component. - Envs []*AppVariableDefinition `json:"envs,omitempty"` - // The instance size to use for this component. - InstanceSizeSlug string `json:"instance_size_slug,omitempty"` - InstanceCount int64 `json:"instance_count,omitempty"` + Envs []*AppVariableDefinition `json:"envs,omitempty"` + InstanceSizeSlug string `json:"instance_size_slug,omitempty"` + InstanceCount int64 `json:"instance_count,omitempty"` // The internal port on which this service's run command will listen. Default: 8080 If there is not an environment variable with the name `PORT`, one will be automatically added with its value set to the value of this field. HTTPPort int64 `json:"http_port,omitempty"` // A list of HTTP routes that should be routed to this component. Routes []*AppRouteSpec `json:"routes,omitempty"` HealthCheck *AppServiceSpecHealthCheck `json:"health_check,omitempty"` + CORS *AppCORSPolicy `json:"cors,omitempty"` + // The ports on which this service will listen for internal traffic. + InternalPorts []int64 `json:"internal_ports,omitempty"` } // AppServiceSpecHealthCheck struct for AppServiceSpecHealthCheck @@ -156,14 +178,13 @@ type AppSpec struct { StaticSites []*AppStaticSiteSpec `json:"static_sites,omitempty"` // Workloads which do not expose publicly-accessible HTTP services. Workers []*AppWorkerSpec `json:"workers,omitempty"` - // One-time or recurring workloads which do not expose publicly-accessible HTTP routes. + // Pre and post deployment workloads which do not expose publicly-accessible HTTP routes. Jobs []*AppJobSpec `json:"jobs,omitempty"` // Database instances which can provide persistence to workloads within the application. Databases []*AppDatabaseSpec `json:"databases,omitempty"` // A set of hostnames where the application will be available. Domains []*AppDomainSpec `json:"domains,omitempty"` - // The slug form of the geographical origin of the app. - Region string `json:"region,omitempty"` + Region string `json:"region,omitempty"` } // AppStaticSiteSpec struct for AppStaticSiteSpec @@ -189,14 +210,17 @@ type AppStaticSiteSpec struct { Envs []*AppVariableDefinition `json:"envs,omitempty"` // A list of HTTP routes that should be routed to this component. Routes []*AppRouteSpec `json:"routes,omitempty"` + CORS *AppCORSPolicy `json:"cors,omitempty"` + // The name of the document to use as the fallback for any requests to documents that are not found when serving this static site. Only 1 of `catchall_document` or `error_document` can be set. + CatchallDocument string `json:"catchall_document,omitempty"` } // AppVariableDefinition struct for AppVariableDefinition type AppVariableDefinition struct { // The name Key string `json:"key"` - // The value. If the type is SECRET, the value will be encrypted on first submission. On following submissions, the encrypted value must be used. - Value string `json:"value,omitempty"` + // The value. If the type is `SECRET`, the value will be encrypted on first submission. On following submissions, the encrypted value should be used. + Value string `json:"value,omitempty"` Scope AppVariableScope `json:"scope,omitempty"` Type AppVariableType `json:"type,omitempty"` } @@ -207,6 +231,7 @@ type AppWorkerSpec struct { Name string `json:"name"` Git *GitSourceSpec `json:"git,omitempty"` GitHub *GitHubSourceSpec `json:"github,omitempty"` + Image *ImageSourceSpec `json:"image,omitempty"` // The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks. DockerfilePath string `json:"dockerfile_path,omitempty"` // An optional build command to run while building this component from source. @@ -224,6 +249,12 @@ type AppWorkerSpec struct { InstanceCount int64 `json:"instance_count,omitempty"` } +// AppCORSPolicy struct for AppCORSPolicy +type AppCORSPolicy struct { + // The set of allowed CORS origins. + AllowOrigins []*AppStringMatch `json:"allow_origins,omitempty"` +} + // Deployment struct for Deployment type Deployment struct { ID string `json:"id,omitempty"` @@ -239,6 +270,7 @@ type Deployment struct { ClonedFrom string `json:"cloned_from,omitempty"` Progress *DeploymentProgress `json:"progress,omitempty"` Phase DeploymentPhase `json:"phase,omitempty"` + TierSlug string `json:"tier_slug,omitempty"` } // DeploymentJob struct for DeploymentJob @@ -265,11 +297,11 @@ const ( // DeploymentProgress struct for DeploymentProgress type DeploymentProgress struct { - PendingSteps int32 `json:"pending_steps,omitempty"` - RunningSteps int32 `json:"running_steps,omitempty"` - SuccessSteps int32 `json:"success_steps,omitempty"` - ErrorSteps int32 `json:"error_steps,omitempty"` - TotalSteps int32 `json:"total_steps,omitempty"` + PendingSteps int32 `json:"pending_steps,omitempty"` + RunningSteps int32 `json:"running_steps,omitempty"` + SuccessSteps int32 `json:"success_steps,omitempty"` + ErrorSteps int32 `json:"error_steps,omitempty"` + TotalSteps int32 `json:"total_steps,omitempty"` Steps []*DeploymentProgressStep `json:"steps,omitempty"` SummarySteps []*DeploymentProgressStep `json:"summary_steps,omitempty"` } @@ -305,16 +337,60 @@ type GitSourceSpec struct { Branch string `json:"branch,omitempty"` } +// ImageSourceSpec struct for ImageSourceSpec +type ImageSourceSpec struct { + RegistryType ImageSourceSpecRegistryType `json:"registry_type,omitempty"` + // The registry name. Must be left empty for the `DOCR` registry type. + Registry string `json:"registry,omitempty"` + // The repository name. + Repository string `json:"repository,omitempty"` + // The repository tag. Defaults to `latest` if not provided. + Tag string `json:"tag,omitempty"` +} + +// ImageSourceSpecRegistryType - UNSPECIFIED: Represents an unspecified registry type. - DOCR: The DigitalOcean container registry type. +type ImageSourceSpecRegistryType string + +// List of ImageSourceSpecRegistryType +const ( + ImageSourceSpecRegistryType_Unspecified ImageSourceSpecRegistryType = "UNSPECIFIED" + ImageSourceSpecRegistryType_DOCR ImageSourceSpecRegistryType = "DOCR" +) + +// AppInstanceSize struct for AppInstanceSize +type AppInstanceSize struct { + Name string `json:"name,omitempty"` + Slug string `json:"slug,omitempty"` + CPUType AppInstanceSizeCPUType `json:"cpu_type,omitempty"` + CPUs string `json:"cpus,omitempty"` + MemoryBytes string `json:"memory_bytes,omitempty"` + USDPerMonth string `json:"usd_per_month,omitempty"` + USDPerSecond string `json:"usd_per_second,omitempty"` + TierSlug string `json:"tier_slug,omitempty"` + TierUpgradeTo string `json:"tier_upgrade_to,omitempty"` + TierDowngradeTo string `json:"tier_downgrade_to,omitempty"` +} + +// AppInstanceSizeCPUType the model 'AppInstanceSizeCPUType' +type AppInstanceSizeCPUType string + +// List of AppInstanceSizeCPUType +const ( + AppInstanceSizeCPUType_Unspecified AppInstanceSizeCPUType = "UNSPECIFIED" + AppInstanceSizeCPUType_Shared AppInstanceSizeCPUType = "SHARED" + AppInstanceSizeCPUType_Dedicated AppInstanceSizeCPUType = "DEDICATED" +) + // DeploymentProgressStep struct for DeploymentProgressStep type DeploymentProgressStep struct { - Name string `json:"name,omitempty"` - Status DeploymentProgressStepStatus `json:"status,omitempty"` - Steps []*DeploymentProgressStep `json:"steps,omitempty"` - StartedAt time.Time `json:"started_at,omitempty"` - EndedAt time.Time `json:"ended_at,omitempty"` - Reason *DeploymentProgressStepReason `json:"reason,omitempty"` - ComponentName string `json:"component_name,omitempty"` - // The base of a human-readable description of the step intended to be combined with the component name for presentation. For example: message_base = \"Building service\" component_name = \"api\" + Name string `json:"name,omitempty"` + Status DeploymentProgressStepStatus `json:"status,omitempty"` + Steps []*DeploymentProgressStep `json:"steps,omitempty"` + StartedAt time.Time `json:"started_at,omitempty"` + EndedAt time.Time `json:"ended_at,omitempty"` + Reason *DeploymentProgressStepReason `json:"reason,omitempty"` + ComponentName string `json:"component_name,omitempty"` + // The base of a human-readable description of the step intended to be combined with the component name for presentation. For example: `message_base` = \"Building service\" `component_name` = \"api\" MessageBase string `json:"message_base,omitempty"` } @@ -339,6 +415,8 @@ type AppRegion struct { Disabled bool `json:"disabled,omitempty"` DataCenters []string `json:"data_centers,omitempty"` Reason string `json:"reason,omitempty"` + // Whether or not the region is presented as the default. + Default bool `json:"default,omitempty"` } // DeploymentProgressStepReason struct for DeploymentProgressStepReason @@ -347,6 +425,24 @@ type DeploymentProgressStepReason struct { Message string `json:"message,omitempty"` } +// AppStringMatch struct for AppStringMatch +type AppStringMatch struct { + // Exact string match. Only 1 of `exact`, `prefix`, or `regex` must be set. + Exact string `json:"exact,omitempty"` + // Prefix-based match. Only 1 of `exact`, `prefix`, or `regex` must be set. + Prefix string `json:"prefix,omitempty"` + Regex string `json:"regex,omitempty"` +} + +// AppTier struct for AppTier +type AppTier struct { + Name string `json:"name,omitempty"` + Slug string `json:"slug,omitempty"` + StorageBytes string `json:"storage_bytes,omitempty"` + EgressBandwidthBytes string `json:"egress_bandwidth_bytes,omitempty"` + BuildSeconds string `json:"build_seconds,omitempty"` +} + // AppVariableScope the model 'AppVariableScope' type AppVariableScope string diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index 28aa7b5b8508b..95c386b212036 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -33,9 +33,17 @@ type AppsService interface { GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) - CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error) + CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool) (*AppLogs, *Response, error) + + ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) + + ListTiers(ctx context.Context) ([]*AppTier, *Response, error) + GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) + + ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error) + GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) } // AppLogs represent app logs. @@ -54,6 +62,11 @@ type AppUpdateRequest struct { Spec *AppSpec `json:"spec"` } +// DeploymentCreateRequest represents a request to create a deployment. +type DeploymentCreateRequest struct { + ForceBuild bool `json:"force_build"` +} + type appRoot struct { App *App `json:"app"` } @@ -70,6 +83,26 @@ type deploymentsRoot struct { Deployments []*Deployment `json:"deployments"` } +type appTierRoot struct { + Tier *AppTier `json:"tier"` +} + +type appTiersRoot struct { + Tiers []*AppTier `json:"tiers"` +} + +type instanceSizeRoot struct { + InstanceSize *AppInstanceSize `json:"instance_size"` +} + +type instanceSizesRoot struct { + InstanceSizes []*AppInstanceSize `json:"instance_sizes"` +} + +type appRegionsRoot struct { + Regions []*AppRegion `json:"regions"` +} + // AppsServiceOp handles communication with Apps methods of the DigitalOcean API. type AppsServiceOp struct { client *Client @@ -182,9 +215,15 @@ func (s *AppsServiceOp) ListDeployments(ctx context.Context, appID string, opts } // CreateDeployment creates an app deployment. -func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error) { +func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) { path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID) - req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil) + + var createReq *DeploymentCreateRequest + for _, c := range create { + createReq = c + } + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createReq) if err != nil { return nil, nil, err } @@ -214,3 +253,78 @@ func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, compon } return logs, resp, nil } + +// ListRegions lists all regions supported by App Platform. +func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) { + path := fmt.Sprintf("%s/regions", appsBasePath) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(appRegionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Regions, resp, nil +} + +// ListTiers lists available app tiers. +func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) { + path := fmt.Sprintf("%s/tiers", appsBasePath) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(appTiersRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Tiers, resp, nil +} + +// GetTier retrieves information about a specific app tier. +func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) { + path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(appTierRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Tier, resp, nil +} + +// ListInstanceSizes lists available instance sizes for service, worker, and job components. +func (s *AppsServiceOp) ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error) { + path := fmt.Sprintf("%s/tiers/instance_sizes", appsBasePath) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(instanceSizesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.InstanceSizes, resp, nil +} + +// GetInstanceSize retreives information about a specific instance size for service, worker, and job components. +func (s *AppsServiceOp) GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) { + path := fmt.Sprintf("%s/tiers/instance_sizes/%s", appsBasePath, slug) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(instanceSizeRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.InstanceSize, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 42d83ee0c4178..5d90d37d0ff89 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -82,7 +82,7 @@ const ( // The DatabasesService provides access to the DigitalOcean managed database // suite of products through the public API. Customers can create new database // clusters, migrate them between regions, create replicas and interact with -// their configurations. Each database service is refered to as a Database. A +// their configurations. Each database service is referred to as a Database. A // SQL database service can have multiple databases residing in the system. To // help make these entities distinct from Databases in godo, we refer to them // here as DatabaseDBs. @@ -269,7 +269,7 @@ type DatabaseCreateUserRequest struct { MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"` } -// DatabaseResetUserAuth request is used to reset a users DB auth +// DatabaseResetUserAuthRequest is used to reset a users DB auth type DatabaseResetUserAuthRequest struct { MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"` } @@ -537,6 +537,7 @@ func (svc *DatabasesServiceOp) CreateUser(ctx context.Context, databaseID string return root.User, resp, nil } +// ResetUserAuth will reset user authentication func (svc *DatabasesServiceOp) ResetUserAuth(ctx context.Context, databaseID, userID string, resetAuth *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error) { path := fmt.Sprintf(databaseResetUserAuthPath, databaseID, userID) req, err := svc.client.NewRequest(ctx, http.MethodPost, path, resetAuth) diff --git a/vendor/github.com/digitalocean/godo/domains.go b/vendor/github.com/digitalocean/godo/domains.go index 6d7a6ae06923c..835509fc7a829 100644 --- a/vendor/github.com/digitalocean/godo/domains.go +++ b/vendor/github.com/digitalocean/godo/domains.go @@ -101,6 +101,7 @@ func (d Domain) String() string { return Stringify(d) } +// URN returns the domain name in a valid DO API URN form. func (d Domain) URN() string { return ToURN("Domain", d.Name) } diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go index 72edf2b4382df..d08a905fb9282 100644 --- a/vendor/github.com/digitalocean/godo/droplets.go +++ b/vendor/github.com/digitalocean/godo/droplets.go @@ -126,6 +126,7 @@ func (d Droplet) String() string { return Stringify(d) } +// URN returns the droplet ID in a valid DO API URN form. func (d Droplet) URN() string { return ToURN("Droplet", d.ID) } diff --git a/vendor/github.com/digitalocean/godo/firewalls.go b/vendor/github.com/digitalocean/godo/firewalls.go index 8453e6645e056..0fb297b0542b0 100644 --- a/vendor/github.com/digitalocean/godo/firewalls.go +++ b/vendor/github.com/digitalocean/godo/firewalls.go @@ -49,6 +49,7 @@ func (fw Firewall) String() string { return Stringify(fw) } +// URN returns the firewall name in a valid DO API URN form. func (fw Firewall) URN() string { return ToURN("Firewall", fw.ID) } diff --git a/vendor/github.com/digitalocean/godo/floating_ips.go b/vendor/github.com/digitalocean/godo/floating_ips.go index 1720d767b4a42..2de812acfaf3e 100644 --- a/vendor/github.com/digitalocean/godo/floating_ips.go +++ b/vendor/github.com/digitalocean/godo/floating_ips.go @@ -37,6 +37,7 @@ func (f FloatingIP) String() string { return Stringify(f) } +// URN returns the floating IP in a valid DO API URN form. func (f FloatingIP) URN() string { return ToURN("FloatingIP", f.IP) } diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 6b9cc2f6b95a6..47bb339b1b089 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -19,7 +19,7 @@ import ( ) const ( - libraryVersion = "1.46.0" + libraryVersion = "1.52.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/github.com/digitalocean/godo/images.go b/vendor/github.com/digitalocean/godo/images.go index 64e72e75ebc6c..30882531ac9f1 100644 --- a/vendor/github.com/digitalocean/godo/images.go +++ b/vendor/github.com/digitalocean/godo/images.go @@ -132,6 +132,7 @@ func (s *ImagesServiceOp) GetBySlug(ctx context.Context, slug string) (*Image, * return s.get(ctx, interface{}(slug)) } +// Create a new image func (s *ImagesServiceOp) Create(ctx context.Context, createRequest *CustomImageCreateRequest) (*Image, *Response, error) { if createRequest == nil { return nil, nil, NewArgError("createRequest", "cannot be nil") diff --git a/vendor/github.com/digitalocean/godo/invoices.go b/vendor/github.com/digitalocean/godo/invoices.go index abc9d2de603ba..c8d7f40838ecd 100644 --- a/vendor/github.com/digitalocean/godo/invoices.go +++ b/vendor/github.com/digitalocean/godo/invoices.go @@ -171,7 +171,7 @@ func (s *InvoicesServiceOp) List(ctx context.Context, opt *ListOptions) (*Invoic return root, resp, err } -// Get a summary of metadata and summarized usage for an Invoice +// GetSummary returns a summary of metadata and summarized usage for an Invoice func (s *InvoicesServiceOp) GetSummary(ctx context.Context, invoiceUUID string) (*InvoiceSummary, *Response, error) { path := fmt.Sprintf("%s/%s/summary", invoicesBasePath, invoiceUUID) @@ -189,7 +189,7 @@ func (s *InvoicesServiceOp) GetSummary(ctx context.Context, invoiceUUID string) return root, resp, err } -// Get the pdf for an Invoice +// GetPDF returns the pdf for an Invoice func (s *InvoicesServiceOp) GetPDF(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) { path := fmt.Sprintf("%s/%s/pdf", invoicesBasePath, invoiceUUID) @@ -207,7 +207,7 @@ func (s *InvoicesServiceOp) GetPDF(ctx context.Context, invoiceUUID string) ([]b return root.Bytes(), resp, err } -// Get the csv for an Invoice +// GetCSV returns the csv for an Invoice func (s *InvoicesServiceOp) GetCSV(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) { path := fmt.Sprintf("%s/%s/csv", invoicesBasePath, invoiceUUID) diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 4fc290e346136..12de4e04b4500 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -28,6 +28,7 @@ type KubernetesService interface { GetUser(context.Context, string) (*KubernetesClusterUser, *Response, error) GetUpgrades(context.Context, string) ([]*KubernetesVersion, *Response, error) GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error) + GetKubeConfigWithExpiry(context.Context, string, int64) (*KubernetesClusterConfig, *Response, error) GetCredentials(context.Context, string, *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error) List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error) Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) @@ -45,6 +46,8 @@ type KubernetesService interface { DeleteNode(ctx context.Context, clusterID, poolID, nodeID string, req *KubernetesNodeDeleteRequest) (*Response, error) GetOptions(context.Context) (*KubernetesOptions, *Response, error) + AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) + RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) } var _ KubernetesService = &KubernetesServiceOp{} @@ -145,6 +148,11 @@ type KubernetesClusterCredentialsGetRequest struct { ExpirySeconds *int `json:"expiry_seconds,omitempty"` } +// KubernetesClusterRegistryRequest represents clusters to integrate with docr registry +type KubernetesClusterRegistryRequest struct { + ClusterUUIDs []string `json:"cluster_uuids,omitempty"` +} + // KubernetesCluster represents a Kubernetes cluster. type KubernetesCluster struct { ID string `json:"id,omitempty"` @@ -163,6 +171,7 @@ type KubernetesCluster struct { MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` AutoUpgrade bool `json:"auto_upgrade,omitempty"` SurgeUpgrade bool `json:"surge_upgrade,omitempty"` + RegistryEnabled bool `json:"registry_enabled,omitempty"` Status *KubernetesClusterStatus `json:"status,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` @@ -198,13 +207,36 @@ type KubernetesMaintenancePolicy struct { type KubernetesMaintenancePolicyDay int const ( + // KubernetesMaintenanceDayAny sets the KubernetesMaintenancePolicyDay to any + // day of the week KubernetesMaintenanceDayAny KubernetesMaintenancePolicyDay = iota + + // KubernetesMaintenanceDayMonday sets the KubernetesMaintenancePolicyDay to + // Monday KubernetesMaintenanceDayMonday + + // KubernetesMaintenanceDayTuesday sets the KubernetesMaintenancePolicyDay to + // Tuesday KubernetesMaintenanceDayTuesday + + // KubernetesMaintenanceDayWednesday sets the KubernetesMaintenancePolicyDay to + // Wednesday KubernetesMaintenanceDayWednesday + + // KubernetesMaintenanceDayThursday sets the KubernetesMaintenancePolicyDay to + // Thursday KubernetesMaintenanceDayThursday + + // KubernetesMaintenanceDayFriday sets the KubernetesMaintenancePolicyDay to + // Friday KubernetesMaintenanceDayFriday + + // KubernetesMaintenanceDaySaturday sets the KubernetesMaintenancePolicyDay to + // Saturday KubernetesMaintenanceDaySaturday + + // KubernetesMaintenanceDaySunday sets the KubernetesMaintenancePolicyDay to + // Sunday KubernetesMaintenanceDaySunday ) @@ -250,6 +282,7 @@ func (k KubernetesMaintenancePolicyDay) String() string { } +// UnmarshalJSON parses the JSON string into KubernetesMaintenancePolicyDay func (k *KubernetesMaintenancePolicyDay) UnmarshalJSON(data []byte) error { var val string if err := json.Unmarshal(data, &val); err != nil { @@ -264,6 +297,7 @@ func (k *KubernetesMaintenancePolicyDay) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON returns the JSON string for KubernetesMaintenancePolicyDay func (k KubernetesMaintenancePolicyDay) MarshalJSON() ([]byte, error) { if KubernetesMaintenanceDayAny <= k && k <= KubernetesMaintenanceDaySunday { return json.Marshal(days[k]) @@ -530,6 +564,27 @@ func (svc *KubernetesServiceOp) GetKubeConfig(ctx context.Context, clusterID str return res, resp, nil } +// GetKubeConfigWithExpiry returns a Kubernetes config file for the specified cluster with expiry_seconds. +func (svc *KubernetesServiceOp) GetKubeConfigWithExpiry(ctx context.Context, clusterID string, expirySeconds int64) (*KubernetesClusterConfig, *Response, error) { + path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + q := req.URL.Query() + q.Add("expiry_seconds", fmt.Sprintf("%d", expirySeconds)) + req.URL.RawQuery = q.Encode() + configBytes := bytes.NewBuffer(nil) + resp, err := svc.client.Do(ctx, req, configBytes) + if err != nil { + return nil, resp, err + } + res := &KubernetesClusterConfig{ + KubeconfigYAML: configBytes.Bytes(), + } + return res, resp, nil +} + // GetCredentials returns a Kubernetes API server credentials for the specified cluster. func (svc *KubernetesServiceOp) GetCredentials(ctx context.Context, clusterID string, get *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error) { path := fmt.Sprintf("%s/%s/credentials", kubernetesClustersPath, clusterID) @@ -716,3 +771,31 @@ func (svc *KubernetesServiceOp) GetOptions(ctx context.Context) (*KubernetesOpti } return root.Options, resp, nil } + +// AddRegistry integrates docr registry with all the specified clusters +func (svc *KubernetesServiceOp) AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) { + path := fmt.Sprintf("%s/registry", kubernetesBasePath) + request, err := svc.client.NewRequest(ctx, http.MethodPost, path, req) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, request, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// RemoveRegistry removes docr registry support for all the specified clusters +func (svc *KubernetesServiceOp) RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) { + path := fmt.Sprintf("%s/registry", kubernetesBasePath) + request, err := svc.client.NewRequest(ctx, http.MethodDelete, path, req) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, request, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go index 48a20879a5623..b7debfe15a082 100644 --- a/vendor/github.com/digitalocean/godo/load_balancers.go +++ b/vendor/github.com/digitalocean/godo/load_balancers.go @@ -53,6 +53,7 @@ func (l LoadBalancer) String() string { return Stringify(l) } +// URN returns the load balancer ID in a valid DO API URN form. func (l LoadBalancer) URN() string { return ToURN("LoadBalancer", l.ID) } diff --git a/vendor/github.com/digitalocean/godo/projects.go b/vendor/github.com/digitalocean/godo/projects.go index 172c2c9e2e3fb..c31573b2936ce 100644 --- a/vendor/github.com/digitalocean/godo/projects.go +++ b/vendor/github.com/digitalocean/godo/projects.go @@ -117,7 +117,7 @@ type ProjectResource struct { Status string `json:"status,omitempty"` } -// ProjetResourceLinks specify the link for more information about the resource. +// ProjectResourceLinks specify the link for more information about the resource. type ProjectResourceLinks struct { Self string `json:"self"` } @@ -252,7 +252,6 @@ func (p *ProjectsServiceOp) ListResources(ctx context.Context, projectID string, // AssignResources assigns one or more resources to a project. AssignResources // accepts resources in two possible formats: - // 1. The resource type, like `&Droplet{ID: 1}` or `&FloatingIP{IP: "1.2.3.4"}` // 2. A valid DO URN as a string, like "do:droplet:1234" // diff --git a/vendor/github.com/digitalocean/godo/registry.go b/vendor/github.com/digitalocean/godo/registry.go index 1b5c40bd3d8cf..51c9eb70573ee 100644 --- a/vendor/github.com/digitalocean/godo/registry.go +++ b/vendor/github.com/digitalocean/godo/registry.go @@ -28,6 +28,13 @@ type RegistryService interface { ListRepositoryTags(context.Context, string, string, *ListOptions) ([]*RepositoryTag, *Response, error) DeleteTag(context.Context, string, string, string) (*Response, error) DeleteManifest(context.Context, string, string, string) (*Response, error) + StartGarbageCollection(context.Context, string) (*GarbageCollection, *Response, error) + GetGarbageCollection(context.Context, string) (*GarbageCollection, *Response, error) + ListGarbageCollections(context.Context, string, *ListOptions) ([]*GarbageCollection, *Response, error) + UpdateGarbageCollection(context.Context, string, string, *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error) + GetOptions(context.Context) (*RegistryOptions, *Response, error) + GetSubscription(context.Context) (*RegistrySubscription, *Response, error) + UpdateSubscription(context.Context, *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) } var _ RegistryService = &RegistryServiceOp{} @@ -39,7 +46,8 @@ type RegistryServiceOp struct { // RegistryCreateRequest represents a request to create a registry. type RegistryCreateRequest struct { - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty"` + SubscriptionTierSlug string `json:"subscription_tier_slug,omitempty"` } // RegistryDockerCredentialsRequest represents a request to retrieve docker @@ -90,6 +98,74 @@ type repositoryTagsRoot struct { Meta *Meta `json:"meta"` } +// GarbageCollection represents a garbage collection. +type GarbageCollection struct { + UUID string `json:"uuid"` + RegistryName string `json:"registry_name"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + BlobsDeleted uint64 `json:"blobs_deleted"` + FreedBytes uint64 `json:"freed_bytes"` +} + +type garbageCollectionRoot struct { + GarbageCollection *GarbageCollection `json:"garbage_collection,omitempty"` +} + +type garbageCollectionsRoot struct { + GarbageCollections []*GarbageCollection `json:"garbage_collections,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta"` +} + +// UpdateGarbageCollectionRequest represents a request to update a garbage +// collection. +type UpdateGarbageCollectionRequest struct { + Cancel bool `json:"cancel"` +} + +// RegistryOptions are options for users when creating or updating a registry. +type RegistryOptions struct { + SubscriptionTiers []*RegistrySubscriptionTier `json:"subscription_tiers,omitempty"` +} + +type registryOptionsRoot struct { + Options *RegistryOptions `json:"options"` +} + +// RegistrySubscriptionTier is a subscription tier for container registry. +type RegistrySubscriptionTier struct { + Name string `json:"name"` + Slug string `json:"slug"` + IncludedRepositories uint64 `json:"included_repositories"` + IncludedStorageBytes uint64 `json:"included_storage_bytes"` + AllowStorageOverage bool `json:"allow_storage_overage"` + IncludedBandwidthBytes uint64 `json:"included_bandwidth_bytes"` + MonthlyPriceInCents uint64 `json:"monthly_price_in_cents"` + Eligible bool `json:"eligible,omitempty"` + // EligibilityReaons is included when Eligible is false, and indicates the + // reasons why this tier is not availble to the user. + EligibilityReasons []string `json:"eligibility_reasons,omitempty"` +} + +// RegistrySubscription is a user's subscription. +type RegistrySubscription struct { + Tier *RegistrySubscriptionTier `json:"tier"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type registrySubscriptionRoot struct { + Subscription *RegistrySubscription `json:"subscription"` +} + +// RegistrySubscriptionUpdateRequest represents a request to update the +// subscription plan for a registry. +type RegistrySubscriptionUpdateRequest struct { + TierSlug string `json:"tier_slug"` +} + // Get retrieves the details of a Registry. func (svc *RegistryServiceOp) Get(ctx context.Context) (*Registry, *Response, error) { req, err := svc.client.NewRequest(ctx, http.MethodGet, registryPath, nil) @@ -251,3 +327,140 @@ func (svc *RegistryServiceOp) DeleteManifest(ctx context.Context, registry, repo return resp, nil } + +// StartGarbageCollection requests a garbage collection for the specified +// registry. +func (svc *RegistryServiceOp) StartGarbageCollection(ctx context.Context, registry string) (*GarbageCollection, *Response, error) { + path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(garbageCollectionRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.GarbageCollection, resp, err +} + +// GetGarbageCollection retrieves the currently-active garbage collection for +// the specified registry; if there are no active garbage collections, then +// return a 404/NotFound error. There can only be one active garbage +// collection on a registry. +func (svc *RegistryServiceOp) GetGarbageCollection(ctx context.Context, registry string) (*GarbageCollection, *Response, error) { + path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(garbageCollectionRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.GarbageCollection, resp, nil +} + +// ListGarbageCollections retrieves all garbage collections (active and +// inactive) for the specified registry. +func (svc *RegistryServiceOp) ListGarbageCollections(ctx context.Context, registry string, opts *ListOptions) ([]*GarbageCollection, *Response, error) { + path := fmt.Sprintf("%s/%s/garbage-collections", registryPath, registry) + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(garbageCollectionsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if root.Links != nil { + resp.Links = root.Links + } + if root.Meta != nil { + resp.Meta = root.Meta + } + + return root.GarbageCollections, resp, nil +} + +// UpdateGarbageCollection updates the specified garbage collection for the +// specified registry. While only the currently-active garbage collection can +// be updated we still require the exact garbage collection to be specified to +// avoid race conditions that might may arise from issuing an update to the +// implicit "currently-active" garbage collection. Returns the updated garbage +// collection. +func (svc *RegistryServiceOp) UpdateGarbageCollection(ctx context.Context, registry, gcUUID string, request *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error) { + path := fmt.Sprintf("%s/%s/garbage-collection/%s", registryPath, registry, gcUUID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, request) + if err != nil { + return nil, nil, err + } + + root := new(garbageCollectionRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.GarbageCollection, resp, nil +} + +// GetOptions returns options the user can use when creating or updating a +// registry. +func (svc *RegistryServiceOp) GetOptions(ctx context.Context) (*RegistryOptions, *Response, error) { + path := fmt.Sprintf("%s/options", registryPath) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(registryOptionsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Options, resp, nil +} + +// GetSubscription retrieves the user's subscription. +func (svc *RegistryServiceOp) GetSubscription(ctx context.Context) (*RegistrySubscription, *Response, error) { + path := fmt.Sprintf("%s/subscription", registryPath) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(registrySubscriptionRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Subscription, resp, nil +} + +// UpdateSubscription updates the user's registry subscription. +func (svc *RegistryServiceOp) UpdateSubscription(ctx context.Context, request *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) { + path := fmt.Sprintf("%s/subscription", registryPath) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, nil, err + } + root := new(registrySubscriptionRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Subscription, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/storage.go b/vendor/github.com/digitalocean/godo/storage.go index e1dda598a5197..43856e38e7288 100644 --- a/vendor/github.com/digitalocean/godo/storage.go +++ b/vendor/github.com/digitalocean/godo/storage.go @@ -60,6 +60,7 @@ func (f Volume) String() string { return Stringify(f) } +// URN returns the volume ID as a valid DO API URN func (f Volume) URN() string { return ToURN("Volume", f.ID) } diff --git a/vendor/github.com/digitalocean/godo/strings.go b/vendor/github.com/digitalocean/godo/strings.go index 4d5c0ad220795..f92893ed2ee83 100644 --- a/vendor/github.com/digitalocean/godo/strings.go +++ b/vendor/github.com/digitalocean/godo/strings.go @@ -10,6 +10,8 @@ import ( var timestampType = reflect.TypeOf(Timestamp{}) +// ResourceWithURN is an interface for interfacing with the types +// that implement the URN method. type ResourceWithURN interface { URN() string } diff --git a/vendor/github.com/digitalocean/godo/vpcs.go b/vendor/github.com/digitalocean/godo/vpcs.go index 7fbeaf88be97a..6c7b141c90436 100644 --- a/vendor/github.com/digitalocean/godo/vpcs.go +++ b/vendor/github.com/digitalocean/godo/vpcs.go @@ -39,6 +39,7 @@ type VPCCreateRequest struct { type VPCUpdateRequest struct { Name string `json:"name,omitempty"` Description string `json:"description,omitempty"` + Default *bool `json:"default,omitempty"` } // VPCSetField allows one to set individual fields within a VPC configuration. @@ -54,6 +55,16 @@ type VPCSetName string // Ex.: VPCs.Set(..., VPCSetDescription("vpc description")) type VPCSetDescription string +// VPCSetDefault is used when one wants to enable the `default` field of a VPC, to +// set a VPC as the default one in the region +// Ex.: VPCs.Set(..., VPCSetDefault()) +func VPCSetDefault() VPCSetField { + return &vpcSetDefault{} +} + +// vpcSetDefault satisfies the VPCSetField interface +type vpcSetDefault struct{} + // VPC represents a DigitalOcean Virtual Private Cloud configuration. type VPC struct { ID string `json:"id,omitempty"` @@ -161,6 +172,10 @@ func (n VPCSetDescription) vpcSetField(in map[string]interface{}) { in["description"] = n } +func (*vpcSetDefault) vpcSetField(in map[string]interface{}) { + in["default"] = true +} + // Set updates specific properties of a Virtual Private Cloud. func (v *VPCsServiceOp) Set(ctx context.Context, id string, fields ...VPCSetField) (*VPC, *Response, error) { path := vpcsBasePath + "/" + id diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index 03ffa31a4eefa..7947f283af12d 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -25,6 +25,15 @@ linters: - whitespace - wsl - funlen + - wrapcheck + - testpackage + - nlreturn + - gofumpt + - goerr113 + - gci + - gomnd + - godot + - exhaustivestruct issues: exclude-rules: diff --git a/vendor/github.com/go-openapi/strfmt/.travis.yml b/vendor/github.com/go-openapi/strfmt/.travis.yml index eb962aebcda8b..eaee5b65a93e6 100644 --- a/vendor/github.com/go-openapi/strfmt/.travis.yml +++ b/vendor/github.com/go-openapi/strfmt/.travis.yml @@ -1,13 +1,29 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.11.x -- 1.12.x +- 1.14.x +- 1.x +arch: +- amd64 +jobs: + include: + # only run fast tests on ppc64le + - go: 1.x + arch: ppc64le + script: + - gotestsum -f short-verbose -- ./... + + # include linting job, but only for latest go version and amd64 arch + - go: 1.x + arch: amd64 + install: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + script: + - golangci-lint run --new-from-rev master + install: - GO111MODULE=off go get -u gotest.tools/gotestsum language: go -env: -- GO111MODULE=on notifications: slack: secure: zE5AtIYTpYfQPnTzP+EaQPN7JKtfFAGv6PrJqoIZLOXa8B6zGb6+J1JRNNxWi7faWbyJOxa4FSSsuPsKZMycUK6wlLFIdhDxwqeo7Ew8r6rdZKdfUHQggfNS9wO79ARoNYUDHtmnaBUS+eWSM1YqSc4i99QxyyfuURLOeAaA/q14YbdlTlaw3lrZ0qT92ot1FnVGNOx064zuHtFeUf+jAVRMZ6Q3rvqllwIlPszE6rmHGXBt2VoJxRaBetdwd7FgkcYw9FPXKHhadwC7/75ZAdmxIukhxNMw4Tr5NuPcqNcnbYLenDP7B3lssGVIrP4BRSqekS1d/tqvdvnnFWHMwrNCkSnSc065G5+qWTlXKAemIclgiXXqE2furBNLm05MDdG8fn5epS0UNarkjD+zX336RiqwBlOX4KbF+vPyqcO98CsN0lnd+H6loc9reiTHs37orFFpQ+309av9be2GGsHUsRB9ssIyrewmhAccOmkRtr2dVTZJNFQwa5Kph5TNJuTjnZEwG/xUkEX2YSfwShOsb062JWiflV6PJdnl80pc9Tn7D5sO5Bf9DbijGRJwwP+YiiJtwtr+vsvS+n4sM0b5eqm4UoRo+JJO8ffoJtHS7ItuyRbVQCwEPJ4221WLcf5PquEEDdAPwR+K4Gj8qTXqTDdxOiES1xFUKVgmzhI= diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ca1cdf862ec2f..3f93a72c86e1f 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -65,7 +65,7 @@ type NameNormalizer func(string) string // DefaultNameNormalizer removes all dashes func DefaultNameNormalizer(name string) string { - return strings.Replace(name, "-", "", -1) + return strings.ReplaceAll(name, "-", "") } type defaultFormats struct { diff --git a/vendor/github.com/go-openapi/strfmt/go.mod b/vendor/github.com/go-openapi/strfmt/go.mod index eaf75447125b7..6eaa6d7b94933 100644 --- a/vendor/github.com/go-openapi/strfmt/go.mod +++ b/vendor/github.com/go-openapi/strfmt/go.mod @@ -1,15 +1,12 @@ module github.com/go-openapi/strfmt require ( - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a - github.com/go-openapi/errors v0.19.2 - github.com/go-stack/stack v1.8.0 // indirect - github.com/google/go-cmp v0.3.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef + github.com/go-openapi/errors v0.19.8 github.com/google/uuid v1.1.1 - github.com/mitchellh/mapstructure v1.1.2 - github.com/stretchr/testify v1.3.0 - github.com/tidwall/pretty v1.0.0 // indirect - go.mongodb.org/mongo-driver v1.0.3 + github.com/mitchellh/mapstructure v1.3.3 + github.com/stretchr/testify v1.6.1 + go.mongodb.org/mongo-driver v1.4.3 ) go 1.13 diff --git a/vendor/github.com/go-openapi/strfmt/go.sum b/vendor/github.com/go-openapi/strfmt/go.sum index e53dd37c10a99..a2d360321b65e 100644 --- a/vendor/github.com/go-openapi/strfmt/go.sum +++ b/vendor/github.com/go-openapi/strfmt/go.sum @@ -1,25 +1,130 @@ -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 0b2f52d2490a2..f1b96267d9086 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -55,23 +55,36 @@ func IsDateTime(str string) bool { const ( // RFC3339Millis represents a ISO8601 format to millis instead of to nanos RFC3339Millis = "2006-01-02T15:04:05.000Z07:00" + // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos + RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700" // RFC3339Micro represents a ISO8601 format to micro instead of to nano RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano + RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700" // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone) ISO8601LocalTime = "2006-01-02T15:04:05" // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs) ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z" - // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) + // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" + // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. + ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) var ( - dateTimeFormats = []string{RFC3339Micro, RFC3339Millis, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime} - rxDateTime = regexp.MustCompile(DateTimePattern) + rxDateTime = regexp.MustCompile(DateTimePattern) + + // DateTimeFormats is the collection of formats used by ParseDateTime() + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis + + // NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC). + // By default, the time value is not changed. + NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } ) // ParseDateTime parses a string that represents an ISO8601 time or a unix epoch @@ -80,7 +93,7 @@ func ParseDateTime(data string) (DateTime, error) { return NewDateTime(), nil } var lastError error - for _, layout := range dateTimeFormats { + for _, layout := range DateTimeFormats { dd, err := time.Parse(layout, data) if err != nil { lastError = err @@ -106,7 +119,7 @@ func NewDateTime() DateTime { // String converts this time to a string func (t DateTime) String() string { - return time.Time(t).Format(MarshalFormat) + return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) } // MarshalText implements the text marshaller interface @@ -150,7 +163,7 @@ func (t DateTime) Value() (driver.Value, error) { // MarshalJSON returns the DateTime as JSON func (t DateTime) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(t).Format(MarshalFormat)) + return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)) } // UnmarshalJSON sets the DateTime from JSON @@ -199,7 +212,7 @@ func (t *DateTime) UnmarshalBSON(data []byte) error { func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { // UnixNano cannot be used, the result of calling UnixNano on the zero // Time is undefined. - i64 := time.Time(t).Unix() * 1000 + i64 := NormalizeTimeForMarshal(time.Time(t)).Unix() * 1000 buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i64)) @@ -245,7 +258,7 @@ func (t *DateTime) GobDecode(data []byte) error { // MarshalBinary implements the encoding.BinaryMarshaler interface. func (t DateTime) MarshalBinary() ([]byte, error) { - return time.Time(t).MarshalBinary() + return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary() } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index 7633019f728c4..685c80a62bc91 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -166,20 +166,25 @@ func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL stri fd := fds.ByNumber(1) return w.marshalValue(fd, m.Get(fd), indent) case "Duration": + const maxSecondsInDuration = 315576000000 // "Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision." s := m.Get(fds.ByNumber(1)).Int() ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } if ns <= -secondInNanos || ns >= secondInNanos { return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) } if (s > 0 && ns < 0) || (s < 0 && ns > 0) { return errors.New("signs of seconds and nanos do not match") } - if s < 0 { - ns = -ns + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns } - x := fmt.Sprintf("%d.%09d", s, ns) + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, ".000") diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go index 4a5931009876a..47eb3e44501d6 100644 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 3f7d4c4949c0d..b38b101688642 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,4 +1,34 @@ -## 0.14.0 (Unreleased) +## 0.15.0 (Unreleased) + +## 0.14.0 (November 11, 2020) + +IMPROVEMENTS + +* Added `identity/v3/endpoints.Endpoint.Enabled` [GH-2030](https://github.com/gophercloud/gophercloud/pull/2030) +* Added `containerinfra/v1/clusters.Upgrade` [GH-2032](https://github.com/gophercloud/gophercloud/pull/2032) +* Added `compute/apiversions.List` [GH-2037](https://github.com/gophercloud/gophercloud/pull/2037) +* Added `compute/apiversions.Get` [GH-2037](https://github.com/gophercloud/gophercloud/pull/2037) +* Added `compute/v2/servers.ListOpts.IP` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) +* Added `compute/v2/servers.ListOpts.IP6` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) +* Added `compute/v2/servers.ListOpts.UserID` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) +* Added `dns/v2/transfer/accept.List` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/accept.Get` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/accept.Create` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/requests.List` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/requests.Get` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/requests.Update` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `dns/v2/transfer/requests.Delete` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) +* Added `baremetal/v1/nodes.RescueWait` [GH-2052](https://github.com/gophercloud/gophercloud/pull/2052) +* Added `baremetal/v1/nodes.Unrescuing` [GH-2052](https://github.com/gophercloud/gophercloud/pull/2052) +* Added `networking/v2/extensions/fwaas_v2/groups.List` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) +* Added `networking/v2/extensions/fwaas_v2/groups.Get` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) +* Added `networking/v2/extensions/fwaas_v2/groups.Create` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) +* Added `networking/v2/extensions/fwaas_v2/groups.Update` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) +* Added `networking/v2/extensions/fwaas_v2/groups.Delete` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) + +BUG FIXES + +* Changed `networking/v2/extensions/layer3/routers.Routes` from `[]Route` to `*[]Route` [GH-2043](https://github.com/gophercloud/gophercloud/pull/2043) ## 0.13.0 (September 27, 2020) diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index ad29041d9bfee..95539563acefa 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -60,6 +60,13 @@ prompted for your password. ### Authentication +> NOTE: It is now recommended to use the `clientconfig` package found at +> https://github.com/gophercloud/utils/tree/master/openstack/clientconfig +> for all authentication purposes. +> +> The below documentation is still relevant. clientconfig simply implements +> the below and presents it in an easier and more flexible way. + Once you have access to your credentials, you can begin plugging them into Gophercloud. The next step is authentication, and this is handled by a base "Provider" struct. To get one, you can either pass in your credentials diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index 4c594e2ef2e63..4e6042409e976 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -29,6 +29,14 @@ type ListOpts struct { // Flavor is the name of the flavor in URL format. Flavor string `q:"flavor"` + // IP is a regular expression to match the IPv4 address of the server. + IP string `q:"ip"` + + // This requires the client to be set to microversion 2.5 or later, unless + // the user is an admin. + // IP is a regular expression to match the IPv6 address of the server. + IP6 string `q:"ip6"` + // Name of the server as a string; can be queried with regular expressions. // Realize that ?name=bob returns both bob and bobb. If you need to match bob // only, you can use a regular expression matching the syntax of the @@ -55,6 +63,11 @@ type ListOpts struct { // Setting "AllTenants = true" is required. TenantID string `q:"tenant_id"` + // This requires the client to be set to microversion 2.83 or later, unless + // the user is an admin. + // UserID lists servers for a particular user. + UserID string `q:"user_id"` + // This requires the client to be set to microversion 2.26 or later. // Tags filters on specific server tags. All tags must be present for the server. Tags string `q:"tags"` diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml index 8687342e9d402..c8860f69bc722 100644 --- a/vendor/github.com/lann/builder/.travis.yml +++ b/vendor/github.com/lann/builder/.travis.yml @@ -1,6 +1,7 @@ language: go go: - - 1.1 - - 1.2 + - '1.8' + - '1.9' + - '1.10' - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 0000000000000..a109e8051c16c --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go index 182e69f407f72..612845418e297 100644 --- a/vendor/github.com/lann/builder/registry.go +++ b/vendor/github.com/lann/builder/registry.go @@ -1,8 +1,14 @@ package builder -import "reflect" +import ( + "reflect" + "sync" +) -var registry = make(map[reflect.Type]reflect.Type) +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) // RegisterType maps the given builderType to a structType. // This mapping affects the type of slices returned by Get and is required for @@ -13,6 +19,8 @@ var registry = make(map[reflect.Type]reflect.Type) // RegisterType will panic if builderType's underlying type is not Builder or // if structType's Kind is not Struct. func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() structType.NumField() // Panic if structType is not a struct registry[builderType] = structType emptyValue := emptyBuilderValue.Convert(builderType) @@ -23,7 +31,7 @@ func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Va // // Returns an empty instance of the registered builder type which can be used // as the initial value for builder expressions. See example. -func Register(builderProto interface{}, structProto interface{}) interface{} { +func Register(builderProto, structProto interface{}) interface{} { empty := RegisterType( reflect.TypeOf(builderProto), reflect.TypeOf(structProto), @@ -32,6 +40,8 @@ func Register(builderProto interface{}, structProto interface{}) interface{} { } func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() structType, ok := registry[builderType] if !ok { return nil diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml index 8eaa06429066d..7d9b172756642 100644 --- a/vendor/github.com/miekg/dns/.travis.yml +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -2,8 +2,8 @@ language: go sudo: false go: - - "1.12.x" - - "1.13.x" + - 1.14.x + - 1.15.x - tip env: diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 1e6b7c52dd60d..fc8394e2697a3 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -26,7 +26,6 @@ avoiding breaking changes wherever reasonable. We support the last two versions A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/coredns/coredns -* https://cloudflare.com * https://github.com/abh/geodns * https://github.com/baidu/bfe * http://www.statdns.com/ @@ -42,11 +41,9 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/StalkR/dns-reverse-proxy * https://github.com/tianon/rawdns * https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ * https://github.com/fcambus/statzone * https://github.com/benschw/dns-clb-go * https://github.com/corny/dnscheck for -* https://namesmith.io * https://github.com/miekg/unbound * https://github.com/miekg/exdns * https://dnslookup.org @@ -55,24 +52,23 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/mehrdadrad/mylg * https://github.com/bamarni/dockness * https://github.com/fffaraz/microdns -* http://kelda.io * https://github.com/ipdcode/hades * https://github.com/StackExchange/dnscontrol/ * https://www.dnsperf.com/ * https://dnssectest.net/ -* https://dns.apebits.com * https://github.com/oif/apex * https://github.com/jedisct1/dnscrypt-proxy * https://github.com/jedisct1/rpdns * https://github.com/xor-gate/sshfp * https://github.com/rs/dnstrace * https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) -* https://github.com/semihalev/sdns * https://render.com * https://github.com/peterzen/goresolver * https://github.com/folbricht/routedns * https://domainr.com/ * https://zonedb.org/ +* https://router7.org/ +* https://github.com/fortio/dnsping Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index bb8667fd68bed..e7ff786a237fc 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -34,7 +34,7 @@ type Client struct { Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and - // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) + // Client.Dialer) or context.Context.Deadline (see ExchangeContext) Timeout time.Duration DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero @@ -106,7 +106,7 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { if err != nil { return nil, err } - + conn.UDPSize = c.UDPSize return conn, nil } @@ -185,9 +185,20 @@ func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err erro } co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId + if _, ok := co.Conn.(net.PacketConn); ok { + for { + r, err = co.ReadMsg() + // Ignore replies with mismatched IDs because they might be + // responses to earlier queries that timed out. + if err != nil || r.Id == m.Id { + break + } + } + } else { + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } } rtt = time.Since(t) return r, rtt, err diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 68c0bd74d06a3..900f6e059d89f 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -3,10 +3,8 @@ package dns import ( "bytes" "crypto" - "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" - _ "crypto/md5" "crypto/rand" "crypto/rsa" _ "crypto/sha1" @@ -318,6 +316,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { } rr.Signature = toBase64(signature) + return nil case RSAMD5, DSA, DSANSEC3SHA1: // See RFC 6944. return ErrAlg @@ -332,9 +331,8 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { } rr.Signature = toBase64(signature) + return nil } - - return nil } func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { @@ -346,7 +344,6 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, switch alg { case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: return signature, nil - case ECDSAP256SHA256, ECDSAP384SHA384: ecdsaSignature := &struct { R, S *big.Int @@ -366,20 +363,11 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, signature := intToBytes(ecdsaSignature.R, intlen) signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) return signature, nil - - // There is no defined interface for what a DSA backed crypto.Signer returns - case DSA, DSANSEC3SHA1: - // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - // signature := []byte{byte(t)} - // signature = append(signature, intToBytes(r1, 20)...) - // signature = append(signature, intToBytes(s1, 20)...) - // rr.Signature = signature - case ED25519: return signature, nil + default: + return nil, ErrAlg } - - return nil, ErrAlg } // Verify validates an RRSet with the signature and key. This is only the @@ -448,7 +436,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { } switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? pubkey := k.publicKeyRSA() // Get the key if pubkey == nil { @@ -600,30 +588,6 @@ func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { return pubkey } -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = new(big.Int).SetBytes(q) - pubkey.Parameters.P = new(big.Int).SetBytes(p) - pubkey.Parameters.G = new(big.Int).SetBytes(g) - pubkey.Y = new(big.Int).SetBytes(y) - return pubkey -} - func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { keybuf, err := fromBase64([]byte(k.PublicKey)) if err != nil { diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go index 60737e5b2b348..2ab7b6d73b80c 100644 --- a/vendor/github.com/miekg/dns/dnssec_keygen.go +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -19,8 +19,6 @@ import ( // bits should be set to the size of the algorithm. func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { switch k.Algorithm { - case RSAMD5, DSA, DSANSEC3SHA1: - return nil, ErrAlg case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: if bits < 512 || bits > 4096 { return nil, ErrKeySize @@ -41,6 +39,8 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { if bits != 256 { return nil, ErrKeySize } + default: + return nil, ErrAlg } switch k.Algorithm { diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index 0e6f3201656b3..6cbc28483f139 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -43,15 +43,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er return nil, ErrPrivKey } switch uint8(algo) { - case RSAMD5, DSA, DSANSEC3SHA1: - return nil, ErrAlg - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: priv, err := readPrivateKeyRSA(m) if err != nil { return nil, err @@ -62,11 +54,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er } priv.PublicKey = *pub return priv, nil - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: + case ECDSAP256SHA256, ECDSAP384SHA384: priv, err := readPrivateKeyECDSA(m) if err != nil { return nil, err @@ -80,7 +68,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er case ED25519: return readPrivateKeyED25519(m) default: - return nil, ErrPrivKey + return nil, ErrAlg } } diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go index 4493c9d574550..072e445dadfa7 100644 --- a/vendor/github.com/miekg/dns/dnssec_privkey.go +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -2,7 +2,6 @@ package dns import ( "crypto" - "crypto/dsa" "crypto/ecdsa" "crypto/rsa" "math/big" @@ -17,8 +16,8 @@ var bigIntOne = big.NewInt(1) // PrivateKeyString converts a PrivateKey to a string. This string has the same // format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the DNSKEY -// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +// It needs some info from the key (the algorithm), so its a method of the DNSKEY. +// It supports *rsa.PrivateKey, *ecdsa.PrivateKey and ed25519.PrivateKey. func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { algorithm := strconv.Itoa(int(r.Algorithm)) algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" @@ -67,21 +66,6 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { "Algorithm: " + algorithm + "\n" + "PrivateKey: " + private + "\n" - case *dsa.PrivateKey: - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - case ed25519.PrivateKey: private := toBase64(p.Seed()) return format + diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index 92421681f952f..6861de774b707 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -260,7 +260,7 @@ From RFC 2931: on requests and responses, and protection of the overall integrity of a response. It works like TSIG, except that SIG(0) uses public key cryptography, instead of -the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, +the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. Signing subsequent messages in multi-message sessions is not implemented. diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index cbcab57bcd7f8..47625ed0902d6 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "encoding/hex" "net" + "sort" "strings" ) @@ -612,6 +613,65 @@ func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { return off, nil } +func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) { + var xs []SVCBKeyValue + var code uint16 + var length uint16 + var err error + for off < len(msg) { + code, off, err = unpackUint16(msg, off) + if err != nil { + return nil, len(msg), &Error{err: "overflow unpacking SVCB"} + } + length, off, err = unpackUint16(msg, off) + if err != nil || off+int(length) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking SVCB"} + } + e := makeSVCBKeyValue(SVCBKey(code)) + if e == nil { + return nil, len(msg), &Error{err: "bad SVCB key"} + } + if err := e.unpack(msg[off : off+int(length)]); err != nil { + return nil, len(msg), err + } + if len(xs) > 0 && e.Key() <= xs[len(xs)-1].Key() { + return nil, len(msg), &Error{err: "SVCB keys not in strictly increasing order"} + } + xs = append(xs, e) + off += int(length) + } + return xs, off, nil +} + +func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) { + pairs = append([]SVCBKeyValue(nil), pairs...) + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].Key() < pairs[j].Key() + }) + prev := svcb_RESERVED + for _, el := range pairs { + if el.Key() == prev { + return len(msg), &Error{err: "repeated SVCB keys are not allowed"} + } + prev = el.Key() + packed, err := el.pack() + if err != nil { + return len(msg), err + } + off, err = packUint16(uint16(el.Key()), msg, off) + if err != nil { + return len(msg), &Error{err: "overflow packing SVCB"} + } + off, err = packUint16(uint16(len(packed)), msg, off) + if err != nil || off+len(packed) > len(msg) { + return len(msg), &Error{err: "overflow packing SVCB"} + } + copy(msg[off:off+len(packed)], packed) + off += len(packed) + } + return off, nil +} + func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { var ( servers []string @@ -683,6 +743,13 @@ func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) { if p.Negation { n = 0x80 } + + // trim trailing zero bytes as specified in RFC3123 Sections 4.1 and 4.2. + i := len(addr) - 1 + for ; i >= 0 && addr[i] == 0; i-- { + } + addr = addr[:i+1] + adflen := uint8(len(addr)) & 0x7f off, err = packUint8(n|adflen, msg, off) if err != nil { diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go index a76150a861b1f..156c5a0e876a6 100644 --- a/vendor/github.com/miekg/dns/msg_truncate.go +++ b/vendor/github.com/miekg/dns/msg_truncate.go @@ -9,7 +9,8 @@ package dns // requested buffer size. // // The TC bit will be set if any records were excluded from the message. -// This indicates to that the client should retry over TCP. +// If the TC bit is already set on the message it will be retained. +// TC indicates that the client should retry over TCP. // // According to RFC 2181, the TC bit should only be set if not all of the // "required" RRs can be included in the response. Unfortunately, we have @@ -28,11 +29,11 @@ func (dns *Msg) Truncate(size int) { } // RFC 6891 mandates that the payload size in an OPT record - // less than 512 bytes must be treated as equal to 512 bytes. + // less than 512 (MinMsgSize) bytes must be treated as equal to 512 bytes. // // For ease of use, we impose that restriction here. - if size < 512 { - size = 512 + if size < MinMsgSize { + size = MinMsgSize } l := msgLenWithCompressionMap(dns, nil) // uncompressed length @@ -77,7 +78,7 @@ func (dns *Msg) Truncate(size int) { } // See the function documentation for when we set this. - dns.Truncated = len(dns.Answer) > numAnswer || + dns.Truncated = dns.Truncated || len(dns.Answer) > numAnswer || len(dns.Ns) > numNS || len(dns.Extra) > numExtra dns.Answer = dns.Answer[:numAnswer] diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index e18566fc8752a..aa2840efba890 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -1210,11 +1210,29 @@ func stringToCm(token string) (e, m uint8, ok bool) { if cmeters, err = strconv.Atoi(s[1]); err != nil { return } + // There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12'). + // So we simply reject it. + // We also make sure the first character is a digit to reject '+-' signs. + if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' { + return + } + if len(s[1]) == 1 { + // 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm. + cmeters *= 10 + } + if len(s[0]) == 0 { + // This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). + break + } fallthrough case 1: if meters, err = strconv.Atoi(s[0]); err != nil { return } + // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. + if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) { + return + } case 0: // huh? return 0, 0, false @@ -1227,13 +1245,10 @@ func stringToCm(token string) (e, m uint8, ok bool) { e = 0 val = cmeters } - for val > 10 { + for val >= 10 { e++ val /= 10 } - if e > 9 { - ok = false - } m = uint8(val) return } @@ -1275,6 +1290,9 @@ func appendOrigin(name, origin string) string { // LOC record helper function func locCheckNorth(token string, latitude uint32) (uint32, bool) { + if latitude > 90 * 1000 * 60 * 60 { + return latitude, false + } switch token { case "n", "N": return LOC_EQUATOR + latitude, true @@ -1286,6 +1304,9 @@ func locCheckNorth(token string, latitude uint32) (uint32, bool) { // LOC record helper function func locCheckEast(token string, longitude uint32) (uint32, bool) { + if longitude > 180 * 1000 * 60 * 60 { + return longitude, false + } switch token { case "e", "E": return LOC_EQUATOR + longitude, true diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 11b08ad1d12e5..69f10052f4ebe 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -590,7 +590,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { // North l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { + if e != nil || l.err || i > 90 { return &ParseError{"", "bad LOC Latitude", l} } rr.Latitude = 1000 * 60 * 60 * uint32(i) @@ -601,7 +601,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { goto East } - if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err { + if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { return &ParseError{"", "bad LOC Latitude minutes", l} } else { rr.Latitude += 1000 * 60 * uint32(i) @@ -609,7 +609,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() - if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err { + if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 { return &ParseError{"", "bad LOC Latitude seconds", l} } else { rr.Latitude += uint32(1000 * i) @@ -627,7 +627,7 @@ East: // East c.Next() // zBlank l, _ = c.Next() - if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err { + if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 { return &ParseError{"", "bad LOC Longitude", l} } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) @@ -638,14 +638,14 @@ East: if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { goto Altitude } - if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err { + if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { return &ParseError{"", "bad LOC Longitude minutes", l} } else { rr.Longitude += 1000 * 60 * uint32(i) } c.Next() // zBlank l, _ = c.Next() - if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err { + if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 { return &ParseError{"", "bad LOC Longitude seconds", l} } else { rr.Longitude += uint32(1000 * i) @@ -668,7 +668,7 @@ Altitude: if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { l.token = l.token[0 : len(l.token)-1] } - if i, err := strconv.ParseFloat(l.token, 32); err != nil { + if i, err := strconv.ParseFloat(l.token, 64); err != nil { return &ParseError{"", "bad LOC Altitude", l} } else { rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) @@ -893,8 +893,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() if i, err := StringToTime(l.token); err != nil { // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below + if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Expiration = uint32(i) } else { return &ParseError{"", "bad RRSIG Expiration", l} @@ -906,7 +905,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Inception = uint32(i) } else { return &ParseError{"", "bad RRSIG Inception", l} diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go index aadb0bf072a26..e7f36e221824f 100644 --- a/vendor/github.com/miekg/dns/serve_mux.go +++ b/vendor/github.com/miekg/dns/serve_mux.go @@ -91,7 +91,7 @@ func (mux *ServeMux) HandleRemove(pattern string) { // are redirected to the parent zone (if that is also registered), // otherwise the child gets the query. // -// If no handler is found, or there is no question, a standard SERVFAIL +// If no handler is found, or there is no question, a standard REFUSED // message is returned func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { var h Handler @@ -102,7 +102,7 @@ func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { if h != nil { h.ServeDNS(w, req) } else { - HandleFailed(w, req) + handleRefused(w, req) } } diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 3cf1a0240112b..30dfd41def49c 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -72,13 +72,22 @@ type response struct { tsigStatus error tsigRequestMAC string tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used + udp net.PacketConn // i/o connection if UDP was used tcp net.Conn // i/o connection if TCP was used udpSession *SessionUDP // oob data to get egress interface right + pcSession net.Addr // address to use when writing to a generic net.PacketConn writer Writer // writer to output the raw DNS bits } +// handleRefused returns a HandlerFunc that returns REFUSED for every request it gets. +func handleRefused(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeRefused) + w.WriteMsg(m) +} + // HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +// Deprecated: This function is going away. func HandleFailed(w ResponseWriter, r *Msg) { m := new(Msg) m.SetRcode(r, RcodeServerFailure) @@ -139,12 +148,24 @@ type Reader interface { ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) } -// defaultReader is an adapter for the Server struct that implements the Reader interface -// using the readTCP and readUDP func of the embedded Server. +// PacketConnReader is an optional interface that Readers can implement to support using generic net.PacketConns. +type PacketConnReader interface { + Reader + + // ReadPacketConn reads a raw message from a generic net.PacketConn UDP connection. Implementations may + // alter connection properties, for example the read-deadline. + ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader and +// PacketConnReader interfaces using the readTCP, readUDP and readPacketConn funcs +// of the embedded Server. type defaultReader struct { *Server } +var _ PacketConnReader = defaultReader{} + func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { return dr.readTCP(conn, timeout) } @@ -153,8 +174,14 @@ func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byt return dr.readUDP(conn, timeout) } +func (dr defaultReader) ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) { + return dr.readPacketConn(conn, timeout) +} + // DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. // Implementations should never return a nil Reader. +// Readers should also implement the optional PacketConnReader interface. +// PacketConnReader is required to use a generic net.PacketConn. type DecorateReader func(Reader) Reader // DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. @@ -317,24 +344,22 @@ func (srv *Server) ActivateAndServe() error { srv.init() - pConn := srv.PacketConn - l := srv.Listener - if pConn != nil { + if srv.PacketConn != nil { // Check PacketConn interface's type is valid and value // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if t, ok := srv.PacketConn.(*net.UDPConn); ok && t != nil { if e := setUDPSocketOptions(t); e != nil { return e } - srv.started = true - unlock() - return srv.serveUDP(t) } + srv.started = true + unlock() + return srv.serveUDP(srv.PacketConn) } - if l != nil { + if srv.Listener != nil { srv.started = true unlock() - return srv.serveTCP(l) + return srv.serveTCP(srv.Listener) } return &Error{err: "bad listeners"} } @@ -438,18 +463,24 @@ func (srv *Server) serveTCP(l net.Listener) error { } // serveUDP starts a UDP listener for the server. -func (srv *Server) serveUDP(l *net.UDPConn) error { +func (srv *Server) serveUDP(l net.PacketConn) error { defer l.Close() - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - reader := Reader(defaultReader{srv}) if srv.DecorateReader != nil { reader = srv.DecorateReader(reader) } + lUDP, isUDP := l.(*net.UDPConn) + readerPC, canPacketConn := reader.(PacketConnReader) + if !isUDP && !canPacketConn { + return &Error{err: "PacketConnReader was not implemented on Reader returned from DecorateReader but is required for net.PacketConn"} + } + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + var wg sync.WaitGroup defer func() { wg.Wait() @@ -459,7 +490,17 @@ func (srv *Server) serveUDP(l *net.UDPConn) error { rtimeout := srv.getReadTimeout() // deadline is not used here for srv.isStarted() { - m, s, err := reader.ReadUDP(l, rtimeout) + var ( + m []byte + sPC net.Addr + sUDP *SessionUDP + err error + ) + if isUDP { + m, sUDP, err = reader.ReadUDP(lUDP, rtimeout) + } else { + m, sPC, err = readerPC.ReadPacketConn(l, rtimeout) + } if err != nil { if !srv.isStarted() { return nil @@ -476,7 +517,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error { continue } wg.Add(1) - go srv.serveUDPPacket(&wg, m, l, s) + go srv.serveUDPPacket(&wg, m, l, sUDP, sPC) } return nil @@ -538,8 +579,8 @@ func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) { } // Serve a new UDP request. -func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) { - w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s} +func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn, udpSession *SessionUDP, pcSession net.Addr) { + w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: udpSession, pcSession: pcSession} if srv.DecorateWriter != nil { w.writer = srv.DecorateWriter(w) } else { @@ -651,6 +692,24 @@ func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *S return m, s, nil } +func (srv *Server) readPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) { + srv.lock.RLock() + if srv.started { + // See the comment in readTCP above. + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + + m := srv.udpPool.Get().([]byte) + n, addr, err := conn.ReadFrom(m) + if err != nil { + srv.udpPool.Put(m) + return nil, nil, err + } + m = m[:n] + return m, addr, nil +} + // WriteMsg implements the ResponseWriter.WriteMsg method. func (w *response) WriteMsg(m *Msg) (err error) { if w.closed { @@ -684,7 +743,10 @@ func (w *response) Write(m []byte) (int, error) { switch { case w.udp != nil: - return WriteToSessionUDP(w.udp, m, w.udpSession) + if u, ok := w.udp.(*net.UDPConn); ok { + return WriteToSessionUDP(u, m, w.udpSession) + } + return w.udp.WriteTo(m, w.pcSession) case w.tcp != nil: if len(m) > MaxMsgSize { return 0, &Error{err: "message too large"} @@ -717,10 +779,12 @@ func (w *response) RemoteAddr() net.Addr { switch { case w.udpSession != nil: return w.udpSession.RemoteAddr() + case w.pcSession != nil: + return w.pcSession case w.tcp != nil: return w.tcp.RemoteAddr() default: - panic("dns: internal error: udpSession and tcp both nil") + panic("dns: internal error: udpSession, pcSession and tcp are all nil") } } diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index 55cf1c3863ac2..9ef13ccf3926c 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -2,7 +2,6 @@ package dns import ( "crypto" - "crypto/dsa" "crypto/ecdsa" "crypto/rsa" "encoding/binary" @@ -85,7 +84,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { var hash crypto.Hash switch rr.Algorithm { - case DSA, RSASHA1: + case RSASHA1: hash = crypto.SHA1 case RSASHA256, ECDSAP256SHA256: hash = crypto.SHA256 @@ -178,17 +177,6 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { hashed := hasher.Sum(nil) sig := buf[sigend:] switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := new(big.Int).SetBytes(sig[:len(sig)/2]) - s := new(big.Int).SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } case RSASHA1, RSASHA256, RSASHA512: pk := k.publicKeyRSA() if pk != nil { diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go new file mode 100644 index 0000000000000..f44dc67d7b509 --- /dev/null +++ b/vendor/github.com/miekg/dns/svcb.go @@ -0,0 +1,744 @@ +package dns + +import ( + "bytes" + "encoding/binary" + "errors" + "net" + "sort" + "strconv" + "strings" +) + +type SVCBKey uint16 + +// Keys defined in draft-ietf-dnsop-svcb-https-01 Section 12.3.2. +const ( + SVCB_MANDATORY SVCBKey = 0 + SVCB_ALPN SVCBKey = 1 + SVCB_NO_DEFAULT_ALPN SVCBKey = 2 + SVCB_PORT SVCBKey = 3 + SVCB_IPV4HINT SVCBKey = 4 + SVCB_ECHCONFIG SVCBKey = 5 + SVCB_IPV6HINT SVCBKey = 6 + svcb_RESERVED SVCBKey = 65535 +) + +var svcbKeyToStringMap = map[SVCBKey]string{ + SVCB_MANDATORY: "mandatory", + SVCB_ALPN: "alpn", + SVCB_NO_DEFAULT_ALPN: "no-default-alpn", + SVCB_PORT: "port", + SVCB_IPV4HINT: "ipv4hint", + SVCB_ECHCONFIG: "echconfig", + SVCB_IPV6HINT: "ipv6hint", +} + +var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap) + +func reverseSVCBKeyMap(m map[SVCBKey]string) map[string]SVCBKey { + n := make(map[string]SVCBKey, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +// String takes the numerical code of an SVCB key and returns its name. +// Returns an empty string for reserved keys. +// Accepts unassigned keys as well as experimental/private keys. +func (key SVCBKey) String() string { + if x := svcbKeyToStringMap[key]; x != "" { + return x + } + if key == svcb_RESERVED { + return "" + } + return "key" + strconv.FormatUint(uint64(key), 10) +} + +// svcbStringToKey returns the numerical code of an SVCB key. +// Returns svcb_RESERVED for reserved/invalid keys. +// Accepts unassigned keys as well as experimental/private keys. +func svcbStringToKey(s string) SVCBKey { + if strings.HasPrefix(s, "key") { + a, err := strconv.ParseUint(s[3:], 10, 16) + // no leading zeros + // key shouldn't be registered + if err != nil || a == 65535 || s[3] == '0' || svcbKeyToStringMap[SVCBKey(a)] != "" { + return svcb_RESERVED + } + return SVCBKey(a) + } + if key, ok := svcbStringToKeyMap[s]; ok { + return key + } + return svcb_RESERVED +} + +func (rr *SVCB) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return &ParseError{l.token, "bad SVCB priority", l} + } + rr.Priority = uint16(i) + + c.Next() // zBlank + l, _ = c.Next() // zString + rr.Target = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return &ParseError{l.token, "bad SVCB Target", l} + } + rr.Target = name + + // Values (if any) + l, _ = c.Next() + var xs []SVCBKeyValue + // Helps require whitespace between pairs. + // Prevents key1000="a"key1001=... + canHaveNextKey := true + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + if !canHaveNextKey { + // The key we can now read was probably meant to be + // a part of the last value. + return &ParseError{l.token, "bad SVCB value quotation", l} + } + + // In key=value pairs, value does not have to be quoted unless value + // contains whitespace. And keys don't need to have values. + // Similarly, keys with an equality signs after them don't need values. + // l.token includes at least up to the first equality sign. + idx := strings.IndexByte(l.token, '=') + var key, value string + if idx < 0 { + // Key with no value and no equality sign + key = l.token + } else if idx == 0 { + return &ParseError{l.token, "bad SVCB key", l} + } else { + key, value = l.token[:idx], l.token[idx+1:] + + if value == "" { + // We have a key and an equality sign. Maybe we have nothing + // after "=" or we have a double quote. + l, _ = c.Next() + if l.value == zQuote { + // Only needed when value ends with double quotes. + // Any value starting with zQuote ends with it. + canHaveNextKey = false + + l, _ = c.Next() + switch l.value { + case zString: + // We have a value in double quotes. + value = l.token + l, _ = c.Next() + if l.value != zQuote { + return &ParseError{l.token, "SVCB unterminated value", l} + } + case zQuote: + // There's nothing in double quotes. + default: + return &ParseError{l.token, "bad SVCB value", l} + } + } + } + } + kv := makeSVCBKeyValue(svcbStringToKey(key)) + if kv == nil { + return &ParseError{l.token, "bad SVCB key", l} + } + if err := kv.parse(value); err != nil { + return &ParseError{l.token, err.Error(), l} + } + xs = append(xs, kv) + case zQuote: + return &ParseError{l.token, "SVCB key can't contain double quotes", l} + case zBlank: + canHaveNextKey = true + default: + return &ParseError{l.token, "bad SVCB values", l} + } + l, _ = c.Next() + } + rr.Value = xs + if rr.Priority == 0 && len(xs) > 0 { + return &ParseError{l.token, "SVCB aliasform can't have values", l} + } + return nil +} + +// makeSVCBKeyValue returns an SVCBKeyValue struct with the key or nil for reserved keys. +func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { + switch key { + case SVCB_MANDATORY: + return new(SVCBMandatory) + case SVCB_ALPN: + return new(SVCBAlpn) + case SVCB_NO_DEFAULT_ALPN: + return new(SVCBNoDefaultAlpn) + case SVCB_PORT: + return new(SVCBPort) + case SVCB_IPV4HINT: + return new(SVCBIPv4Hint) + case SVCB_ECHCONFIG: + return new(SVCBECHConfig) + case SVCB_IPV6HINT: + return new(SVCBIPv6Hint) + case svcb_RESERVED: + return nil + default: + e := new(SVCBLocal) + e.KeyCode = key + return e + } +} + +// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-01). +type SVCB struct { + Hdr RR_Header + Priority uint16 + Target string `dns:"domain-name"` + Value []SVCBKeyValue `dns:"pairs"` // Value must be empty if Priority is non-zero. +} + +// HTTPS RR. Everything valid for SVCB applies to HTTPS as well. +// Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols. +type HTTPS struct { + SVCB +} + +func (rr *HTTPS) String() string { + return rr.SVCB.String() +} + +func (rr *HTTPS) parse(c *zlexer, o string) *ParseError { + return rr.SVCB.parse(c, o) +} + +// SVCBKeyValue defines a key=value pair for the SVCB RR type. +// An SVCB RR can have multiple SVCBKeyValues appended to it. +type SVCBKeyValue interface { + Key() SVCBKey // Key returns the numerical key code. + pack() ([]byte, error) // pack returns the encoded value. + unpack([]byte) error // unpack sets the value. + String() string // String returns the string representation of the value. + parse(string) error // parse sets the value to the given string representation of the value. + copy() SVCBKeyValue // copy returns a deep-copy of the pair. + len() int // len returns the length of value in the wire format. +} + +// SVCBMandatory pair adds to required keys that must be interpreted for the RR +// to be functional. +// Basic use pattern for creating a mandatory option: +// +// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}} +// e := new(dns.SVCBMandatory) +// e.Code = []uint16{65403} +// s.Value = append(s.Value, e) +type SVCBMandatory struct { + Code []SVCBKey // Must not include mandatory +} + +func (*SVCBMandatory) Key() SVCBKey { return SVCB_MANDATORY } + +func (s *SVCBMandatory) String() string { + str := make([]string, len(s.Code)) + for i, e := range s.Code { + str[i] = e.String() + } + return strings.Join(str, ",") +} + +func (s *SVCBMandatory) pack() ([]byte, error) { + codes := append([]SVCBKey(nil), s.Code...) + sort.Slice(codes, func(i, j int) bool { + return codes[i] < codes[j] + }) + b := make([]byte, 2*len(codes)) + for i, e := range codes { + binary.BigEndian.PutUint16(b[2*i:], uint16(e)) + } + return b, nil +} + +func (s *SVCBMandatory) unpack(b []byte) error { + if len(b)%2 != 0 { + return errors.New("dns: svcbmandatory: value length is not a multiple of 2") + } + codes := make([]SVCBKey, 0, len(b)/2) + for i := 0; i < len(b); i += 2 { + // We assume strictly increasing order. + codes = append(codes, SVCBKey(binary.BigEndian.Uint16(b[i:]))) + } + s.Code = codes + return nil +} + +func (s *SVCBMandatory) parse(b string) error { + str := strings.Split(b, ",") + codes := make([]SVCBKey, 0, len(str)) + for _, e := range str { + codes = append(codes, svcbStringToKey(e)) + } + s.Code = codes + return nil +} + +func (s *SVCBMandatory) len() int { + return 2 * len(s.Code) +} + +func (s *SVCBMandatory) copy() SVCBKeyValue { + return &SVCBMandatory{ + append([]SVCBKey(nil), s.Code...), + } +} + +// SVCBAlpn pair is used to list supported connection protocols. +// Protocol ids can be found at: +// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +// Basic use pattern for creating an alpn option: +// +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBAlpn) +// e.Alpn = []string{"h2", "http/1.1"} +// h.Value = append(o.Value, e) +type SVCBAlpn struct { + Alpn []string +} + +func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } +func (s *SVCBAlpn) String() string { return strings.Join(s.Alpn, ",") } + +func (s *SVCBAlpn) pack() ([]byte, error) { + // Liberally estimate the size of an alpn as 10 octets + b := make([]byte, 0, 10*len(s.Alpn)) + for _, e := range s.Alpn { + if len(e) == 0 { + return nil, errors.New("dns: svcbalpn: empty alpn-id") + } + if len(e) > 255 { + return nil, errors.New("dns: svcbalpn: alpn-id too long") + } + b = append(b, byte(len(e))) + b = append(b, e...) + } + return b, nil +} + +func (s *SVCBAlpn) unpack(b []byte) error { + // Estimate the size of the smallest alpn as 4 bytes + alpn := make([]string, 0, len(b)/4) + for i := 0; i < len(b); { + length := int(b[i]) + i++ + if i+length > len(b) { + return errors.New("dns: svcbalpn: alpn array overflowing") + } + alpn = append(alpn, string(b[i:i+length])) + i += length + } + s.Alpn = alpn + return nil +} + +func (s *SVCBAlpn) parse(b string) error { + s.Alpn = strings.Split(b, ",") + return nil +} + +func (s *SVCBAlpn) len() int { + var l int + for _, e := range s.Alpn { + l += 1 + len(e) + } + return l +} + +func (s *SVCBAlpn) copy() SVCBKeyValue { + return &SVCBAlpn{ + append([]string(nil), s.Alpn...), + } +} + +// SVCBNoDefaultAlpn pair signifies no support for default connection protocols. +// Basic use pattern for creating a no-default-alpn option: +// +// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}} +// e := new(dns.SVCBNoDefaultAlpn) +// s.Value = append(s.Value, e) +type SVCBNoDefaultAlpn struct{} + +func (*SVCBNoDefaultAlpn) Key() SVCBKey { return SVCB_NO_DEFAULT_ALPN } +func (*SVCBNoDefaultAlpn) copy() SVCBKeyValue { return &SVCBNoDefaultAlpn{} } +func (*SVCBNoDefaultAlpn) pack() ([]byte, error) { return []byte{}, nil } +func (*SVCBNoDefaultAlpn) String() string { return "" } +func (*SVCBNoDefaultAlpn) len() int { return 0 } + +func (*SVCBNoDefaultAlpn) unpack(b []byte) error { + if len(b) != 0 { + return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value") + } + return nil +} + +func (*SVCBNoDefaultAlpn) parse(b string) error { + if len(b) != 0 { + return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value") + } + return nil +} + +// SVCBPort pair defines the port for connection. +// Basic use pattern for creating a port option: +// +// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}} +// e := new(dns.SVCBPort) +// e.Port = 80 +// s.Value = append(s.Value, e) +type SVCBPort struct { + Port uint16 +} + +func (*SVCBPort) Key() SVCBKey { return SVCB_PORT } +func (*SVCBPort) len() int { return 2 } +func (s *SVCBPort) String() string { return strconv.FormatUint(uint64(s.Port), 10) } +func (s *SVCBPort) copy() SVCBKeyValue { return &SVCBPort{s.Port} } + +func (s *SVCBPort) unpack(b []byte) error { + if len(b) != 2 { + return errors.New("dns: svcbport: port length is not exactly 2 octets") + } + s.Port = binary.BigEndian.Uint16(b) + return nil +} + +func (s *SVCBPort) pack() ([]byte, error) { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, s.Port) + return b, nil +} + +func (s *SVCBPort) parse(b string) error { + port, err := strconv.ParseUint(b, 10, 16) + if err != nil { + return errors.New("dns: svcbport: port out of range") + } + s.Port = uint16(port) + return nil +} + +// SVCBIPv4Hint pair suggests an IPv4 address which may be used to open connections +// if A and AAAA record responses for SVCB's Target domain haven't been received. +// In that case, optionally, A and AAAA requests can be made, after which the connection +// to the hinted IP address may be terminated and a new connection may be opened. +// Basic use pattern for creating an ipv4hint option: +// +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBIPv4Hint) +// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} +// +// Or +// +// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} +// h.Value = append(h.Value, e) +type SVCBIPv4Hint struct { + Hint []net.IP +} + +func (*SVCBIPv4Hint) Key() SVCBKey { return SVCB_IPV4HINT } +func (s *SVCBIPv4Hint) len() int { return 4 * len(s.Hint) } + +func (s *SVCBIPv4Hint) pack() ([]byte, error) { + b := make([]byte, 0, 4*len(s.Hint)) + for _, e := range s.Hint { + x := e.To4() + if x == nil { + return nil, errors.New("dns: svcbipv4hint: expected ipv4, hint is ipv6") + } + b = append(b, x...) + } + return b, nil +} + +func (s *SVCBIPv4Hint) unpack(b []byte) error { + if len(b) == 0 || len(b)%4 != 0 { + return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4") + } + x := make([]net.IP, 0, len(b)/4) + for i := 0; i < len(b); i += 4 { + x = append(x, net.IP(b[i:i+4])) + } + s.Hint = x + return nil +} + +func (s *SVCBIPv4Hint) String() string { + str := make([]string, len(s.Hint)) + for i, e := range s.Hint { + x := e.To4() + if x == nil { + return "" + } + str[i] = x.String() + } + return strings.Join(str, ",") +} + +func (s *SVCBIPv4Hint) parse(b string) error { + if strings.Contains(b, ":") { + return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6") + } + str := strings.Split(b, ",") + dst := make([]net.IP, len(str)) + for i, e := range str { + ip := net.ParseIP(e).To4() + if ip == nil { + return errors.New("dns: svcbipv4hint: bad ip") + } + dst[i] = ip + } + s.Hint = dst + return nil +} + +func (s *SVCBIPv4Hint) copy() SVCBKeyValue { + return &SVCBIPv4Hint{ + append([]net.IP(nil), s.Hint...), + } +} + +// SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx]. +// Basic use pattern for creating an echconfig option: +// +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBECHConfig) +// e.ECH = []byte{0xfe, 0x08, ...} +// h.Value = append(h.Value, e) +type SVCBECHConfig struct { + ECH []byte +} + +func (*SVCBECHConfig) Key() SVCBKey { return SVCB_ECHCONFIG } +func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) } +func (s *SVCBECHConfig) len() int { return len(s.ECH) } + +func (s *SVCBECHConfig) pack() ([]byte, error) { + return append([]byte(nil), s.ECH...), nil +} + +func (s *SVCBECHConfig) copy() SVCBKeyValue { + return &SVCBECHConfig{ + append([]byte(nil), s.ECH...), + } +} + +func (s *SVCBECHConfig) unpack(b []byte) error { + s.ECH = append([]byte(nil), b...) + return nil +} +func (s *SVCBECHConfig) parse(b string) error { + x, err := fromBase64([]byte(b)) + if err != nil { + return errors.New("dns: svcbechconfig: bad base64 echconfig") + } + s.ECH = x + return nil +} + +// SVCBIPv6Hint pair suggests an IPv6 address which may be used to open connections +// if A and AAAA record responses for SVCB's Target domain haven't been received. +// In that case, optionally, A and AAAA requests can be made, after which the +// connection to the hinted IP address may be terminated and a new connection may be opened. +// Basic use pattern for creating an ipv6hint option: +// +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBIPv6Hint) +// e.Hint = []net.IP{net.ParseIP("2001:db8::1")} +// h.Value = append(h.Value, e) +type SVCBIPv6Hint struct { + Hint []net.IP +} + +func (*SVCBIPv6Hint) Key() SVCBKey { return SVCB_IPV6HINT } +func (s *SVCBIPv6Hint) len() int { return 16 * len(s.Hint) } + +func (s *SVCBIPv6Hint) pack() ([]byte, error) { + b := make([]byte, 0, 16*len(s.Hint)) + for _, e := range s.Hint { + if len(e) != net.IPv6len || e.To4() != nil { + return nil, errors.New("dns: svcbipv6hint: expected ipv6, hint is ipv4") + } + b = append(b, e...) + } + return b, nil +} + +func (s *SVCBIPv6Hint) unpack(b []byte) error { + if len(b) == 0 || len(b)%16 != 0 { + return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16") + } + x := make([]net.IP, 0, len(b)/16) + for i := 0; i < len(b); i += 16 { + ip := net.IP(b[i : i+16]) + if ip.To4() != nil { + return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4") + } + x = append(x, ip) + } + s.Hint = x + return nil +} + +func (s *SVCBIPv6Hint) String() string { + str := make([]string, len(s.Hint)) + for i, e := range s.Hint { + if x := e.To4(); x != nil { + return "" + } + str[i] = e.String() + } + return strings.Join(str, ",") +} + +func (s *SVCBIPv6Hint) parse(b string) error { + if strings.Contains(b, ".") { + return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4") + } + str := strings.Split(b, ",") + dst := make([]net.IP, len(str)) + for i, e := range str { + ip := net.ParseIP(e) + if ip == nil { + return errors.New("dns: svcbipv6hint: bad ip") + } + dst[i] = ip + } + s.Hint = dst + return nil +} + +func (s *SVCBIPv6Hint) copy() SVCBKeyValue { + return &SVCBIPv6Hint{ + append([]net.IP(nil), s.Hint...), + } +} + +// SVCBLocal pair is intended for experimental/private use. The key is recommended +// to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER]. +// Basic use pattern for creating a keyNNNNN option: +// +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBLocal) +// e.KeyCode = 65400 +// e.Data = []byte("abc") +// h.Value = append(h.Value, e) +type SVCBLocal struct { + KeyCode SVCBKey // Never 65535 or any assigned keys. + Data []byte // All byte sequences are allowed. +} + +func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode } +func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil } +func (s *SVCBLocal) len() int { return len(s.Data) } + +func (s *SVCBLocal) unpack(b []byte) error { + s.Data = append([]byte(nil), b...) + return nil +} + +func (s *SVCBLocal) String() string { + var str strings.Builder + str.Grow(4 * len(s.Data)) + for _, e := range s.Data { + if ' ' <= e && e <= '~' { + switch e { + case '"', ';', ' ', '\\': + str.WriteByte('\\') + str.WriteByte(e) + default: + str.WriteByte(e) + } + } else { + str.WriteString(escapeByte(e)) + } + } + return str.String() +} + +func (s *SVCBLocal) parse(b string) error { + data := make([]byte, 0, len(b)) + for i := 0; i < len(b); { + if b[i] != '\\' { + data = append(data, b[i]) + i++ + continue + } + if i+1 == len(b) { + return errors.New("dns: svcblocal: svcb private/experimental key escape unterminated") + } + if isDigit(b[i+1]) { + if i+3 < len(b) && isDigit(b[i+2]) && isDigit(b[i+3]) { + a, err := strconv.ParseUint(b[i+1:i+4], 10, 8) + if err == nil { + i += 4 + data = append(data, byte(a)) + continue + } + } + return errors.New("dns: svcblocal: svcb private/experimental key bad escaped octet") + } else { + data = append(data, b[i+1]) + i += 2 + } + } + s.Data = data + return nil +} + +func (s *SVCBLocal) copy() SVCBKeyValue { + return &SVCBLocal{s.KeyCode, + append([]byte(nil), s.Data...), + } +} + +func (rr *SVCB) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + sprintName(rr.Target) + for _, e := range rr.Value { + s += " " + e.Key().String() + "=\"" + e.String() + "\"" + } + return s +} + +// areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their +// copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function. +func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool { + a = append([]SVCBKeyValue(nil), a...) + b = append([]SVCBKeyValue(nil), b...) + sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() }) + sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() }) + for i, e := range a { + if e.Key() != b[i].Key() { + return false + } + b1, err1 := e.pack() + b2, err2 := b[i].pack() + if err1 != nil || err2 != nil || !bytes.Equal(b1, b2) { + return false + } + } + return true +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go index c207d616d90e1..59904dd6a090a 100644 --- a/vendor/github.com/miekg/dns/tsig.go +++ b/vendor/github.com/miekg/dns/tsig.go @@ -2,7 +2,6 @@ package dns import ( "crypto/hmac" - "crypto/md5" "crypto/sha1" "crypto/sha256" "crypto/sha512" @@ -16,12 +15,13 @@ import ( // HMAC hashing codes. These are transmitted as domain names. const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." HmacSHA1 = "hmac-sha1." HmacSHA224 = "hmac-sha224." HmacSHA256 = "hmac-sha256." HmacSHA384 = "hmac-sha384." HmacSHA512 = "hmac-sha512." + + HmacMD5 = "hmac-md5.sig-alg.reg.int." // Deprecated: HmacMD5 is no longer supported. ) // TSIG is the RR the holds the transaction signature of a message. @@ -121,8 +121,6 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s t := new(TSIG) var h hash.Hash switch CanonicalName(rr.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) case HmacSHA1: h = hmac.New(sha1.New, rawsecret) case HmacSHA224: @@ -185,8 +183,6 @@ func tsigVerify(msg []byte, secret, requestMAC string, timersOnly bool, now uint var h hash.Hash switch CanonicalName(tsig.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) case HmacSHA1: h = hmac.New(sha1.New, rawsecret) case HmacSHA224: diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 7776b4f066697..1f385bd229bfd 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -81,6 +81,8 @@ const ( TypeCDNSKEY uint16 = 60 TypeOPENPGPKEY uint16 = 61 TypeCSYNC uint16 = 62 + TypeSVCB uint16 = 64 + TypeHTTPS uint16 = 65 TypeSPF uint16 = 99 TypeUINFO uint16 = 100 TypeUID uint16 = 101 diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 26403f301ad1d..5c75851b41dc1 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 31} +var Version = v{1, 1, 35} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index d7ec2d974370f..0d3b34bd9b24f 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -402,6 +402,27 @@ func (r1 *HIP) isDuplicate(_r2 RR) bool { return true } +func (r1 *HTTPS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*HTTPS) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + if len(r1.Value) != len(r2.Value) { + return false + } + if !areSVCBPairArraysEqual(r1.Value, r2.Value) { + return false + } + return true +} + func (r1 *KEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KEY) if !ok { @@ -1076,6 +1097,27 @@ func (r1 *SSHFP) isDuplicate(_r2 RR) bool { return true } +func (r1 *SVCB) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SVCB) + if !ok { + return false + } + _ = r2 + if r1.Priority != r2.Priority { + return false + } + if !isDuplicateName(r1.Target, r2.Target) { + return false + } + if len(r1.Value) != len(r2.Value) { + return false + } + if !areSVCBPairArraysEqual(r1.Value, r2.Value) { + return false + } + return true +} + func (r1 *TA) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*TA) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 02a5dfa4a2349..d24a10fa24267 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -316,6 +316,22 @@ func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bo return off, nil } +func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataSVCB(rr.Value, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { @@ -906,6 +922,22 @@ func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *SVCB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataSVCB(rr.Value, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.KeyTag, msg, off) if err != nil { @@ -1559,6 +1591,31 @@ func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Value, off, err = unpackDataSVCB(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -2461,6 +2518,31 @@ func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *SVCB) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Value, off, err = unpackDataSVCB(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 5bb59fa601195..11b51bf217109 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -33,6 +33,7 @@ var TypeToRR = map[uint16]func() RR{ TypeGPOS: func() RR { return new(GPOS) }, TypeHINFO: func() RR { return new(HINFO) }, TypeHIP: func() RR { return new(HIP) }, + TypeHTTPS: func() RR { return new(HTTPS) }, TypeKEY: func() RR { return new(KEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, @@ -70,6 +71,7 @@ var TypeToRR = map[uint16]func() RR{ TypeSPF: func() RR { return new(SPF) }, TypeSRV: func() RR { return new(SRV) }, TypeSSHFP: func() RR { return new(SSHFP) }, + TypeSVCB: func() RR { return new(SVCB) }, TypeTA: func() RR { return new(TA) }, TypeTALINK: func() RR { return new(TALINK) }, TypeTKEY: func() RR { return new(TKEY) }, @@ -110,6 +112,7 @@ var TypeToString = map[uint16]string{ TypeGPOS: "GPOS", TypeHINFO: "HINFO", TypeHIP: "HIP", + TypeHTTPS: "HTTPS", TypeISDN: "ISDN", TypeIXFR: "IXFR", TypeKEY: "KEY", @@ -153,6 +156,7 @@ var TypeToString = map[uint16]string{ TypeSPF: "SPF", TypeSRV: "SRV", TypeSSHFP: "SSHFP", + TypeSVCB: "SVCB", TypeTA: "TA", TypeTALINK: "TALINK", TypeTKEY: "TKEY", @@ -191,6 +195,7 @@ func (rr *GID) Header() *RR_Header { return &rr.Hdr } func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr } func (rr *KEY) Header() *RR_Header { return &rr.Hdr } func (rr *KX) Header() *RR_Header { return &rr.Hdr } func (rr *L32) Header() *RR_Header { return &rr.Hdr } @@ -229,6 +234,7 @@ func (rr *SOA) Header() *RR_Header { return &rr.Hdr } func (rr *SPF) Header() *RR_Header { return &rr.Hdr } func (rr *SRV) Header() *RR_Header { return &rr.Hdr } func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *SVCB) Header() *RR_Header { return &rr.Hdr } func (rr *TA) Header() *RR_Header { return &rr.Hdr } func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } @@ -592,6 +598,15 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l += len(rr.FingerPrint) / 2 return l } +func (rr *SVCB) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += 2 // Priority + l += domainNameLen(rr.Target, off+l, compression, false) + for _, x := range rr.Value { + l += 4 + int(x.len()) + } + return l +} func (rr *TA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -753,6 +768,9 @@ func (rr *HIP) copy() RR { copy(RendezvousServers, rr.RendezvousServers) return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} } +func (rr *HTTPS) copy() RR { + return &HTTPS{*rr.SVCB.copy().(*SVCB)} +} func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } @@ -879,6 +897,13 @@ func (rr *SRV) copy() RR { func (rr *SSHFP) copy() RR { return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} } +func (rr *SVCB) copy() RR { + Value := make([]SVCBKeyValue, len(rr.Value)) + for i, e := range rr.Value { + Value[i] = e.copy() + } + return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value} +} func (rr *TA) copy() RR { return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} } diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml index b122a8e3d9f82..5e31a95a8b8ef 100644 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - "1.11.x" + - "1.14.x" - tip script: diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 0a21e2cd1b09a..20eea2b7ade15 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,24 @@ +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + ## 1.2.2 * Do not add unsettable (unexported) values to the unused metadata key diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index daea3318e03c1..f41bcc58fbb17 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -100,6 +100,47 @@ // "address": "123 Maple St.", // } // +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// // Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct @@ -378,6 +419,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error outputKind := getKind(outVal) + addMetaKey := true switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) @@ -396,7 +438,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e case reflect.Map: err = d.decodeMap(name, input, outVal) case reflect.Ptr: - err = d.decodePtr(name, input, outVal) + addMetaKey, err = d.decodePtr(name, input, outVal) case reflect.Slice: err = d.decodeSlice(name, input, outVal) case reflect.Array: @@ -410,7 +452,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // If we reached here, then we successfully decoded SOMETHING, so // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { + if addMetaKey && d.config.Metadata != nil && name != "" { d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } @@ -421,7 +463,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil } dataVal := reflect.ValueOf(data) @@ -798,30 +867,31 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") + keyName := f.Name + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { continue } - keyName = tagParts[0] - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct - // If "squash" is specified in the tag, we squash the field down. - if !squash { - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue } + + // If "squash" is specified in the tag, we squash the field down. + squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 if squash && v.Kind() != reflect.Struct { return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } + keyName = tagValue[:index] + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue } switch v.Kind() { @@ -836,11 +906,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -861,7 +942,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return nil } -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { // If the input data is nil, then we want to just set the output // pointer to be nil as well. isNil := data == nil @@ -882,7 +963,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er val.Set(nilValue) } - return nil + return true, nil } // Create an element of the concrete (non pointer) type and decode @@ -896,16 +977,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er } if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err + return false, err } val.Set(realVal) } else { if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err + return false, err } } - return nil + return false, nil } func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { @@ -1084,13 +1165,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: @@ -1144,7 +1235,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e fieldKind := fieldType.Type.Kind() // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldKind == reflect.Struct + squash := d.config.Squash && fieldKind == reflect.Struct && fieldType.Anonymous remain := false // We always parse the tags cause we're looking for other tags too @@ -1172,9 +1263,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } // Build our field - fieldCurrent := field{fieldType, structVal.Field(i)} if remain { - remainField = &fieldCurrent + remainField = &field{fieldType, structVal.Field(i)} } else { // Normal struct field, store it away fields = append(fields, field{fieldType, structVal.Field(i)}) @@ -1293,6 +1383,24 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e return nil } +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func getKind(val reflect.Value) reflect.Kind { kind := val.Kind() diff --git a/vendor/github.com/prometheus/alertmanager/api/v1/api.go b/vendor/github.com/prometheus/alertmanager/api/v1/api.go index 3a1466439cf57..0a1df61b1d448 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v1/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v1/api.go @@ -662,7 +662,7 @@ func matchFilterLabels(matchers []*labels.Matcher, sms map[string]string) bool { if string(m.Value) == "" && !prs { continue } - if !prs || !m.Matches(string(v)) { + if !m.Matches(string(v)) { return false } } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/api.go b/vendor/github.com/prometheus/alertmanager/api/v2/api.go index 6cde1972fddb3..24f308d5428b2 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/api.go @@ -551,7 +551,7 @@ func matchFilterLabels(matchers []*labels.Matcher, sms map[string]string) bool { if m.Value == "" && !prs { continue } - if !prs || !m.Matches(v) { + if !m.Matches(v) { return false } } diff --git a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go index 1e0e0c69f4632..cdf216af6eb7a 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go +++ b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go @@ -37,9 +37,9 @@ var Assets = func() http.FileSystem { "/static/index.html": &vfsgen۰CompressedFileInfo{ name: "index.html", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 1314, + uncompressedSize: 1381, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x4f\x4f\xdc\x3e\x10\xbd\xf3\x29\x86\x5c\x92\xd5\x6f\x37\x11\xb7\x9f\xb6\xc9\x4a\x08\x50\x45\xa5\x96\x4a\x70\xe9\x71\x88\x27\x89\x8b\xd7\xb6\xc6\x13\x28\xaa\xf8\xee\x95\x93\x8a\x25\xd9\xd0\xd6\x97\xf8\xcf\xbc\x97\x37\xcf\x33\x2e\x4f\x2f\x6f\x2e\xee\xbe\x7d\xbd\x82\x4e\xf6\x66\x77\x52\xc6\x0f\x18\xb4\x6d\x95\x90\x4d\x76\x27\x00\x00\x65\x47\xa8\xc6\xe9\xb0\xdc\x93\x20\xd4\x1d\x72\x20\xa9\x92\x5e\x9a\xcd\xff\xc9\xfc\xd8\xe2\x9e\xaa\xe4\x51\xd3\x93\x77\x2c\x09\xd4\xce\x0a\x59\xa9\x92\x27\xad\xa4\xab\x14\x3d\xea\x9a\x36\xc3\x62\x0d\xda\x6a\xd1\x68\x36\xa1\x46\x43\xd5\xd9\x1a\x42\xc7\xda\x3e\x6c\xc4\x6d\x1a\x2d\x95\x75\x6f\xe9\x45\x8b\xa1\xdd\xb9\x21\x96\x3d\x5a\x6c\x89\xcb\x62\xdc\x1b\xc5\x16\x07\xb5\xe5\xbd\x53\xcf\x6f\xa0\xa1\x66\xed\xe5\xb0\x11\x47\x51\xc0\x75\x03\xd2\x11\x13\xe8\x00\xd6\x81\x30\x6a\xa3\x6d\x0b\xc1\x60\xe8\x00\x25\x9e\x02\x59\x05\x6e\x08\x04\x8f\xd2\x81\xb6\xc3\xbc\x67\xb3\x9e\xf3\xa1\x52\xe0\x2c\xe5\x70\xd7\xe9\x00\x64\x43\xcf\x14\x00\x43\x20\x09\x60\xf4\x03\xc1\x28\x24\xff\x1e\x00\x99\xc0\x38\x54\xa4\xc0\xb3\xf3\xc4\xe6\x79\x42\xa7\x1b\xc8\x8c\xab\x51\xb4\xb3\x79\xfc\x71\x34\x36\x0f\xfd\x7d\x10\xce\x36\x67\x2b\x38\xad\x20\x2d\xd2\x15\xfc\x9c\xc0\xe2\x38\x82\x41\xb5\xb0\xf7\x5f\x84\x7f\x38\x02\xd7\xce\x06\x67\x28\x37\xae\xcd\x52\x54\x51\xdf\x60\x47\xba\x9a\xc6\xbe\x1c\xdc\x2d\xe6\xf6\xfe\xf6\x1b\x02\xd7\x55\xf2\x9a\x72\xb2\x7b\x37\x72\x7a\x33\x8f\xc8\x80\xde\x43\x05\x57\x66\x9f\x7f\x46\x6d\xf3\x58\x28\xd9\x71\xa6\x8d\xc1\x36\x6c\x17\x2c\x88\xc3\xb3\x53\x7d\x1d\x93\xde\x82\x70\x4f\xeb\xc5\x28\x45\x0d\xf6\x46\x2e\x98\x50\x1c\x6f\x07\xa3\xcc\xad\x38\xc6\x96\xf2\x96\xe4\x5a\x68\x9f\xa5\xd3\xa8\x74\xb5\xcc\xd5\xb2\xeb\xfd\xd5\x0f\x8f\x56\x9d\x1b\xb3\x85\x4f\xb7\x37\x5f\x72\x1f\xdb\x25\x5b\xa6\x9d\x02\xd2\xd5\xea\x88\xf6\x65\x6a\xfa\xec\x12\xd0\xfb\x3c\xb6\x59\xc8\x3d\x71\xd0\x41\x2e\x27\x3a\x87\x72\xa9\x59\xdf\x53\xd6\xf4\x76\xb0\x22\x8b\x77\xff\x5e\xd1\xbc\x0a\x0c\xef\xe4\xbd\x1e\xda\x7b\x5e\x09\x7f\x13\xf5\x71\x92\xe5\x92\x28\x1a\x0e\x49\xfd\xbb\xb0\x99\x73\xeb\xd1\xeb\x20\xac\x6d\xab\x9b\xe7\x03\xe3\x1f\xc4\x4e\xcb\xb1\x2c\xc6\x47\xa3\x2c\xc6\x17\xf1\x57\x00\x00\x00\xff\xff\x58\x9a\x7f\x3b\x22\x05\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\xc1\x6e\xdb\x38\x10\xbd\xe7\x2b\x26\xbc\x58\xc6\x5a\x12\x72\x5b\x78\x45\x03\x41\x12\x2c\x52\xa0\x4d\x81\xe4\xd2\xe3\x44\x1c\x49\x6c\x28\x92\x20\x47\x4e\x8c\x22\xff\x5e\x50\x2a\xe2\xc8\x76\xda\xf2\x62\x71\x38\xf3\xfc\xde\x9b\x21\xab\xf3\xeb\xbb\xab\x87\x6f\x5f\x6f\xa0\xe3\xde\x6c\xce\xaa\xf4\x03\x06\x6d\x2b\x05\x59\xb1\x39\x03\x00\xa8\x3a\x42\x35\x7d\x8e\xdb\x9e\x18\xa1\xee\x30\x44\x62\x29\x06\x6e\xf2\x7f\xc5\xe1\xb1\xc5\x9e\xa4\xd8\x6a\x7a\xf6\x2e\xb0\x80\xda\x59\x26\xcb\x52\x3c\x6b\xc5\x9d\x54\xb4\xd5\x35\xe5\xe3\x66\x05\xda\x6a\xd6\x68\xf2\x58\xa3\x21\x79\xb1\x82\xd8\x05\x6d\x9f\x72\x76\x79\xa3\x59\x5a\xf7\x1e\xde\x68\xfb\x04\x81\x8c\x14\xba\x76\x56\x00\xef\x3c\x49\xa1\x7b\x6c\xa9\x7c\xc9\xa7\x58\x17\xa8\x91\xa2\xc1\x6d\xda\x16\xba\x76\x02\xca\x77\x10\xac\xd9\xd0\xe6\xd2\x50\xe0\x1e\x2d\xb6\x14\xaa\x72\x8a\x4d\x7a\xcb\xbd\xe0\xea\xd1\xa9\xdd\xbb\xd2\x58\x07\xed\x79\x1f\x48\xab\x2c\xe1\xb6\x01\xee\x28\x10\xe8\x08\xd6\x01\x07\xd4\x46\xdb\x16\xa2\xc1\xd8\x01\x72\x3a\x05\xb2\x0a\xdc\x98\x08\x1e\xb9\x03\x6d\xc7\xef\x21\x98\xd5\x21\x1e\x2a\x05\xce\x52\x01\x0f\x9d\x8e\x40\x36\x0e\x81\x22\x60\x8c\xc4\x11\x8c\x7e\x22\x98\x88\x14\xdf\x23\x60\x20\x30\x0e\x15\x29\xf0\xc1\x79\x0a\x66\x37\x83\xd3\x0d\x64\xc6\xd5\xc8\xda\xd9\x22\xfd\x71\xea\x4d\x11\x87\xc7\xc8\x21\xcb\x2f\x96\x70\x2e\x61\x51\x2e\x96\xf0\x63\x56\x96\xd6\x51\x19\xc8\x13\xb1\x7f\x52\xf9\x7f\x47\xc5\xb5\xb3\xd1\x19\x2a\x8c\x6b\xb3\x05\xaa\xc4\x6f\xb4\x63\xb1\x9c\xe7\xbe\xee\xdd\x2d\x0f\xed\xfd\xe5\x37\xc4\x50\x4b\xf1\x26\x59\x6c\x3e\xcc\x9c\x77\x66\x8b\x01\xd0\x7b\x90\x70\x63\xfa\xe2\x33\x6a\x5b\xa4\x59\xcb\x8e\x95\x36\x06\xdb\xb8\x3e\x61\x41\x5a\x3e\x38\x35\xd4\x49\xf4\x1a\x38\x0c\xb4\x3a\x99\xa5\xa8\xc1\xc1\xf0\x55\x20\x64\x17\xd6\xa3\x51\xe6\x9e\x5d\xc0\x96\x8a\x96\xf8\x96\xa9\xcf\x16\xf3\xac\xc5\xf2\x34\x56\x1b\xdc\xe0\x6f\x5e\x3c\x5a\x75\x69\xcc\x1a\x3e\xdd\xdf\x7d\x29\x7c\xba\x71\xd9\x69\xd8\x79\xc1\x62\xb9\x3c\x82\x7d\x9d\x9b\x7e\xd0\x04\xf4\xbe\x48\x37\x35\x16\x9e\x42\xd4\x91\xaf\x67\x3c\xc7\x71\xa9\x83\x7e\xa4\xac\x19\xec\x68\x45\x96\x7a\xff\xd1\xd0\xbc\x11\x8c\x1f\xe8\x5e\x8d\x2f\xc4\xe1\x24\xfc\x89\xd4\xff\x33\x95\xa7\x48\xd1\x78\x48\xea\xef\x89\x1d\x38\xb7\x9a\xbc\x8e\x1c\xb4\x6d\x75\xb3\xdb\x23\xfe\x86\xec\x7c\x1c\xab\x72\x7a\x34\xaa\x72\x7a\x54\x7f\x06\x00\x00\xff\xff\x90\x80\x4f\x52\x65\x05\x00\x00"), }, "/static/lib": &vfsgen۰DirInfo{ name: "lib", diff --git a/vendor/github.com/prometheus/alertmanager/config/coordinator.go b/vendor/github.com/prometheus/alertmanager/config/coordinator.go index 0e6e00c52d879..e8fa7850358f2 100644 --- a/vendor/github.com/prometheus/alertmanager/config/coordinator.go +++ b/vendor/github.com/prometheus/alertmanager/config/coordinator.go @@ -17,7 +17,6 @@ import ( "crypto/md5" "encoding/binary" "sync" - "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -97,15 +96,10 @@ func (c *Coordinator) notifySubscribers() error { func (c *Coordinator) loadFromFile() error { conf, err := LoadFile(c.configFilePath) if err != nil { - c.configSuccessMetric.Set(0) return err } c.config = conf - c.configSuccessMetric.Set(1) - c.configSuccessTimeMetric.Set(float64(time.Now().Unix())) - hash := md5HashAsMetricValue([]byte(c.config.original)) - c.configHashMetric.Set(hash) return nil } @@ -126,6 +120,7 @@ func (c *Coordinator) Reload() error { "file", c.configFilePath, "err", err, ) + c.configSuccessMetric.Set(0) return err } level.Info(c.logger).Log( @@ -139,9 +134,15 @@ func (c *Coordinator) Reload() error { "file", c.configFilePath, "err", err, ) + c.configSuccessMetric.Set(0) return err } + c.configSuccessMetric.Set(1) + c.configSuccessTimeMetric.SetToCurrentTime() + hash := md5HashAsMetricValue([]byte(c.config.original)) + c.configHashMetric.Set(hash) + return nil } diff --git a/vendor/github.com/prometheus/alertmanager/notify/notify.go b/vendor/github.com/prometheus/alertmanager/notify/notify.go index f211685e1b7bc..0be8b488be0fc 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/notify.go +++ b/vendor/github.com/prometheus/alertmanager/notify/notify.go @@ -213,9 +213,11 @@ type NotificationLog interface { } type metrics struct { - numNotifications *prometheus.CounterVec - numFailedNotifications *prometheus.CounterVec - notificationLatencySeconds *prometheus.HistogramVec + numNotifications *prometheus.CounterVec + numTotalFailedNotifications *prometheus.CounterVec + numNotificationRequestsTotal *prometheus.CounterVec + numNotificationRequestsFailedTotal *prometheus.CounterVec + notificationLatencySeconds *prometheus.HistogramVec } func newMetrics(r prometheus.Registerer) *metrics { @@ -225,11 +227,21 @@ func newMetrics(r prometheus.Registerer) *metrics { Name: "notifications_total", Help: "The total number of attempted notifications.", }, []string{"integration"}), - numFailedNotifications: prometheus.NewCounterVec(prometheus.CounterOpts{ + numTotalFailedNotifications: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "alertmanager", Name: "notifications_failed_total", Help: "The total number of failed notifications.", }, []string{"integration"}), + numNotificationRequestsTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "alertmanager", + Name: "notification_requests_total", + Help: "The total number of attempted notification requests.", + }, []string{"integration"}), + numNotificationRequestsFailedTotal: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "alertmanager", + Name: "notification_requests_failed_total", + Help: "The total number of failed notification requests.", + }, []string{"integration"}), notificationLatencySeconds: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "alertmanager", Name: "notification_latency_seconds", @@ -248,10 +260,16 @@ func newMetrics(r prometheus.Registerer) *metrics { "victorops", } { m.numNotifications.WithLabelValues(integration) - m.numFailedNotifications.WithLabelValues(integration) + m.numTotalFailedNotifications.WithLabelValues(integration) + m.numNotificationRequestsTotal.WithLabelValues(integration) + m.numNotificationRequestsFailedTotal.WithLabelValues(integration) m.notificationLatencySeconds.WithLabelValues(integration) } - r.MustRegister(m.numNotifications, m.numFailedNotifications, m.notificationLatencySeconds) + r.MustRegister( + m.numNotifications, m.numTotalFailedNotifications, + m.numNotificationRequestsTotal, m.numNotificationRequestsFailedTotal, + m.notificationLatencySeconds, + ) return m } @@ -389,7 +407,7 @@ func NewGossipSettleStage(p *cluster.Peer) *GossipSettleStage { return &GossipSettleStage{peer: p} } -func (n *GossipSettleStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { +func (n *GossipSettleStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { if n.peer != nil { n.peer.WaitReady() } @@ -407,7 +425,7 @@ func NewMuteStage(m types.Muter) *MuteStage { } // Exec implements the Stage interface. -func (n *MuteStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { +func (n *MuteStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { var filtered []*types.Alert for _, a := range alerts { // TODO(fabxc): increment total alerts counter. @@ -434,7 +452,7 @@ func NewWaitStage(wait func() time.Duration) *WaitStage { } // Exec implements the Stage interface. -func (ws *WaitStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { +func (ws *WaitStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { select { case <-time.After(ws.wait()): case <-ctx.Done(): @@ -541,7 +559,7 @@ func (n *DedupStage) needsUpdate(entry *nflogpb.Entry, firing, resolved map[uint } // Exec implements the Stage interface. -func (n *DedupStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { +func (n *DedupStage) Exec(ctx context.Context, _ log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { gkey, ok := GroupKey(ctx) if !ok { return ctx, nil, errors.New("group key missing") @@ -609,8 +627,16 @@ func NewRetryStage(i Integration, groupName string, metrics *metrics) *RetryStag } } -// Exec implements the Stage interface. func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { + r.metrics.numNotifications.WithLabelValues(r.integration.Name()).Inc() + ctx, alerts, err := r.exec(ctx, l, alerts...) + if err != nil { + r.metrics.numTotalFailedNotifications.WithLabelValues(r.integration.Name()).Inc() + } + return ctx, alerts, err +} + +func (r RetryStage) exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { var sent []*types.Alert // If we shouldn't send notifications for resolved alerts, but there are only @@ -663,9 +689,9 @@ func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Ale now := time.Now() retry, err := r.integration.Notify(ctx, sent...) r.metrics.notificationLatencySeconds.WithLabelValues(r.integration.Name()).Observe(time.Since(now).Seconds()) - r.metrics.numNotifications.WithLabelValues(r.integration.Name()).Inc() + r.metrics.numNotificationRequestsTotal.WithLabelValues(r.integration.Name()).Inc() if err != nil { - r.metrics.numFailedNotifications.WithLabelValues(r.integration.Name()).Inc() + r.metrics.numNotificationRequestsFailedTotal.WithLabelValues(r.integration.Name()).Inc() if !retry { return ctx, alerts, errors.Wrapf(err, "%s/%s: notify retry canceled due to unrecoverable error after %d attempts", r.groupName, r.integration.String(), i) } diff --git a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go index 7d85392bb1f95..a145135bba9cd 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go +++ b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go @@ -215,8 +215,8 @@ func (n *Notifier) notifyV2( RoutingKey: tmpl(string(n.conf.RoutingKey)), EventAction: eventType, DedupKey: key.Hash(), - Images: make([]pagerDutyImage, len(n.conf.Images)), - Links: make([]pagerDutyLink, len(n.conf.Links)), + Images: make([]pagerDutyImage, 0, len(n.conf.Images)), + Links: make([]pagerDutyLink, 0, len(n.conf.Links)), Payload: &pagerDutyPayload{ Summary: summary, Source: tmpl(n.conf.Client), @@ -228,15 +228,27 @@ func (n *Notifier) notifyV2( }, } - for index, item := range n.conf.Images { - msg.Images[index].Src = tmpl(item.Src) - msg.Images[index].Alt = tmpl(item.Alt) - msg.Images[index].Href = tmpl(item.Href) + for _, item := range n.conf.Images { + image := pagerDutyImage{ + Src: tmpl(item.Src), + Alt: tmpl(item.Alt), + Href: tmpl(item.Href), + } + + if image.Src != "" { + msg.Images = append(msg.Images, image) + } } - for index, item := range n.conf.Links { - msg.Links[index].HRef = tmpl(item.Href) - msg.Links[index].Text = tmpl(item.Text) + for _, item := range n.conf.Links { + link := pagerDutyLink{ + HRef: tmpl(item.Href), + Text: tmpl(item.Text), + } + + if link.HRef != "" { + msg.Links = append(msg.Links, link) + } } if tmplErr != nil { diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 13b36464d976c..138a33b7dad97 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -117,8 +117,6 @@ func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { } const ( - statusAPIError = 422 - apiPrefix = "/api/v1" epAlerts = apiPrefix + "/alerts" @@ -138,6 +136,7 @@ const ( epConfig = apiPrefix + "/status/config" epFlags = apiPrefix + "/status/flags" epRuntimeinfo = apiPrefix + "/status/runtimeinfo" + epTSDB = apiPrefix + "/status/tsdb" ) // AlertState models the state of an alert. @@ -254,6 +253,8 @@ type API interface { TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) // Metadata returns metadata about metrics currently scraped by the metric name. Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) + // TSDB returns the cardinality statistics. + TSDB(ctx context.Context) (TSDBResult, error) } // AlertsResult contains the result from querying the alerts endpoint. @@ -282,18 +283,18 @@ type FlagsResult map[string]string // RuntimeinfoResult contains the result from querying the runtimeinfo endpoint. type RuntimeinfoResult struct { - StartTime string `json:"startTime"` - CWD string `json:"CWD"` - ReloadConfigSuccess bool `json:"reloadConfigSuccess"` - LastConfigTime string `json:"lastConfigTime"` - ChunkCount int `json:"chunkCount"` - TimeSeriesCount int `json:"timeSeriesCount"` - CorruptionCount int `json:"corruptionCount"` - GoroutineCount int `json:"goroutineCount"` - GOMAXPROCS int `json:"GOMAXPROCS"` - GOGC string `json:"GOGC"` - GODEBUG string `json:"GODEBUG"` - StorageRetention string `json:"storageRetention"` + StartTime time.Time `json:"startTime"` + CWD string `json:"CWD"` + ReloadConfigSuccess bool `json:"reloadConfigSuccess"` + LastConfigTime time.Time `json:"lastConfigTime"` + ChunkCount int `json:"chunkCount"` + TimeSeriesCount int `json:"timeSeriesCount"` + CorruptionCount int `json:"corruptionCount"` + GoroutineCount int `json:"goroutineCount"` + GOMAXPROCS int `json:"GOMAXPROCS"` + GOGC string `json:"GOGC"` + GODEBUG string `json:"GODEBUG"` + StorageRetention string `json:"storageRetention"` } // SnapshotResult contains the result from querying the snapshot endpoint. @@ -404,6 +405,20 @@ type queryResult struct { v model.Value } +// TSDBResult contains the result from querying the tsdb endpoint. +type TSDBResult struct { + SeriesCountByMetricName []Stat `json:"seriesCountByMetricName"` + LabelValueCountByLabelName []Stat `json:"labelValueCountByLabelName"` + MemoryInBytesByLabelName []Stat `json:"memoryInBytesByLabelName"` + SeriesCountByLabelValuePair []Stat `json:"seriesCountByLabelValuePair"` +} + +// Stat models information about statistic value. +type Stat struct { + Name string `json:"name"` + Value uint64 `json:"value"` +} + func (rg *RuleGroup) UnmarshalJSON(b []byte) error { v := struct { Name string `json:"name"` @@ -883,6 +898,24 @@ func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (ma return res, json.Unmarshal(body, &res) } +func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { + u := h.client.URL(epTSDB, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return TSDBResult{}, err + } + + _, body, _, err := h.client.Do(ctx, req) + if err != nil { + return TSDBResult{}, err + } + + var res TSDBResult + return res, json.Unmarshal(body, &res) + +} + // Warnings is an array of non critical errors type Warnings []string @@ -908,7 +941,7 @@ type apiResponse struct { func apiError(code int) bool { // These are the codes that Prometheus sends when it returns an error. - return code == statusAPIError || code == http.StatusBadRequest + return code == http.StatusUnprocessableEntity || code == http.StatusBadRequest } func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) { @@ -971,7 +1004,8 @@ func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Respon } -// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request. +// DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it +// will fallback to a GET request. func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) { req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode())) if err != nil { @@ -980,7 +1014,7 @@ func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url. req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, body, warnings, err := h.Do(ctx, req) - if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed { + if resp != nil && (resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotImplemented) { u.RawQuery = args.Encode() req, err = http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 0e1b48c03f142..3f8fd790d66ab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -163,7 +163,7 @@ func (c *counter) updateExemplar(v float64, l Labels) { // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. type CounterVec struct { - *metricVec + *MetricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and @@ -176,11 +176,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. return result }), @@ -188,7 +188,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { } // GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of +// values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Counter is created. // // It is possible to call this method without using the returned Counter to only @@ -202,7 +202,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { // Counter with the same label values is created later. // // An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). +// number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as @@ -211,7 +211,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } @@ -219,19 +219,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { } // GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is +// must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Counter is created. Implications of // creating a Counter without using it and keeping the Counter for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). +// with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.metricVec.getMetricWith(labels) + metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -275,7 +275,7 @@ func (v *CounterVec) With(labels Labels) Counter { // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.curryWith(labels) + vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &CounterVec{vec}, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 2f19f5e1e7eaf..957d93a2dbb9c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -51,7 +51,7 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric + // variableLabels contains names of labels for which the metric // maintains variable values. variableLabels []string // id is a hash of the values of the ConstLabels and fqName. This diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index d67573f767a96..bd0733d6a7d6b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *metricVec + *MetricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and @@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } - result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. return result }), @@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { } // GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of +// values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Gauge is created. // // It is possible to call this method without using the returned Gauge to only @@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { // example. // // An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). +// number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as @@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } @@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { } // GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is +// must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Gauge is created. Implications of // creating a Gauge without using it and keeping the Gauge for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). +// with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.metricVec.getMetricWith(labels) + metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge { // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.curryWith(labels) + vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &GaugeVec{vec}, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ea05cf429f2ba..6f67d10464928 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -58,9 +58,10 @@ type goCollector struct { // collector will use the memstats from a previous collection if // runtime.ReadMemStats takes more than 1s. However, if there are no previously // collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. (The problem might be solved -// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go -// issue.) +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index d4ea301a33ce1..f71e286be55f3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -192,7 +192,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h := &histogram{ desc: desc, upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), counts: [2]*histogramCounts{{}, {}}, now: time.Now, } @@ -409,7 +409,7 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *metricVec + *MetricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and @@ -422,14 +422,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } // GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of +// values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Histogram is created. // // It is possible to call this method without using the returned Histogram to only @@ -444,7 +444,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { // example. // // An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). +// number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as @@ -453,7 +453,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Observer), err } @@ -461,19 +461,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) } // GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is +// must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Histogram is created. Implications of // creating a Histogram without using it and keeping the Histogram for later use // are the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). +// with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) + metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Observer), err } @@ -517,7 +517,7 @@ func (v *HistogramVec) With(labels Labels) Observer { // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) + vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &HistogramVec{vec}, err } @@ -602,7 +602,7 @@ func NewConstHistogram( count: count, sum: sum, buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), }, nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 35bd8bde34c75..a2b80b1c19dac 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -89,7 +89,7 @@ type Opts struct { // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index f3c1440d1c65b..cf70071496794 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -110,7 +110,7 @@ type SummaryOpts struct { // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective @@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ desc: desc, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. @@ -221,7 +221,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), hotBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap), @@ -513,7 +513,7 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *metricVec + *MetricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and @@ -535,14 +535,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { + MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } // GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of +// values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Summary is created. // // It is possible to call this method without using the returned Summary to only @@ -557,7 +557,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { // example. // // An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). +// number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as @@ -566,7 +566,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Observer), err } @@ -574,19 +574,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { } // GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is +// must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Summary is created. Implications of // creating a Summary without using it and keeping the Summary for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). +// with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) + metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Observer), err } @@ -630,7 +630,7 @@ func (v *SummaryVec) With(labels Labels) Observer { // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) + vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &SummaryVec{vec}, err } @@ -716,7 +716,7 @@ func NewConstSummary( count: count, sum: sum, quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), }, nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 6206928cc67c2..8304de47733d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -63,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val desc: desc, valType: valueType, function: function, - labelPairs: makeLabelPairs(desc, nil), + labelPairs: MakeLabelPairs(desc, nil), } result.init(result) return result @@ -95,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues desc: desc, valType: valueType, val: value, - labelPairs: makeLabelPairs(desc, labelValues), + labelPairs: MakeLabelPairs(desc, labelValues), }, nil } @@ -145,7 +145,14 @@ func populateMetric( return nil } -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { +// MakeLabelPairs is a helper function to create protobuf LabelPairs from the +// variable and constant labels in the provided Desc. The values for the +// variable labels are defined by the labelValues slice, which must be in the +// same order as the corresponding variable labels in the Desc. +// +// This function is only needed for custom Metric implementations. See MetricVec +// example. +func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index d53848dc48100..6ba49d85bdab9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,12 +20,20 @@ import ( "github.com/prometheus/common/model" ) -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. -type metricVec struct { +// MetricVec is a Collector to bundle metrics of the same name that differ in +// their label values. MetricVec is not used directly but as a building block +// for implementations of vectors of a given metric type, like GaugeVec, +// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be +// used for custom Metric implementations. +// +// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in +// FooVec and initialize it with NewMetricVec. Implement wrappers for +// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather +// than (Metric, error). Similarly, create a wrapper for CurryWith that returns +// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also +// add the convenience methods WithLabelValues, With, and MustCurryWith, which +// panic instead of returning errors. See also the MetricVec example. +type MetricVec struct { *metricMap curry []curriedLabelValue @@ -35,9 +43,9 @@ type metricVec struct { hashAddByte func(h uint64, b byte) uint64 } -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ +// NewMetricVec returns an initialized metricVec. +func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ metricMap: &metricMap{ metrics: map[uint64][]metricWithLabelValues{}, desc: desc, @@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool { // // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { +func (m *MetricVec) Delete(labels Labels) bool { h, err := m.hashLabels(labels) if err != nil { return false @@ -95,15 +103,32 @@ func (m *metricVec) Delete(labels Labels) bool { // show up in GoDoc. // Describe implements Collector. -func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } +func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } // Collect implements Collector. -func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } +func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } // Reset deletes all metrics in this vector. -func (m *metricVec) Reset() { m.metricMap.Reset() } - -func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { +func (m *MetricVec) Reset() { m.metricMap.Reset() } + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the MetricVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +// +// Note that CurryWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { var ( newCurry []curriedLabelValue oldCurry = m.curry @@ -128,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { return nil, fmt.Errorf("%d unknown label(s) found during currying", l) } - return &metricVec{ + return &MetricVec{ metricMap: m.metricMap, curry: newCurry, hashAdd: m.hashAdd, @@ -136,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { }, nil } -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the variable labels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created (by +// calling the newMetric function provided during construction of the +// MetricVec). +// +// It is possible to call this method without using the returned Metry to only +// create the new Metric but leave it in its intitial state. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of variable labels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// +// Note that GetMetricWithLabelValues is usually not called directly but through +// a wrapper around MetricVec, implementing a vector for a specific Metric +// implementation, for example GaugeVec. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -145,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the variable labels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the variable labels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +// +// Note that GetMetricWith is usually not called directly but through a wrapper +// around MetricVec, implementing a vector for a specific Metric implementation, +// for example GaugeVec. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -154,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil } -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { return 0, err } @@ -177,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { return h, nil } -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { return 0, err } @@ -276,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues( } if len(metrics) > 1 { + old := metrics m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} } else { delete(m.metrics, h) } @@ -302,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels( } if len(metrics) > 1 { + old := metrics m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + old[len(old)-1] = metricWithLabelValues{} } else { delete(m.metrics, h) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 438aa5e9247d9..c1b12f0847e98 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -32,7 +32,9 @@ import ( // in a no-op Registerer. // // WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics exposed. +// Collectors. It should not be used to add fixed labels to all metrics +// exposed. See also +// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels // // Conflicts between Collectors registered through the original Registerer with // Collectors registered through the wrapping Registerer will still be diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 342e5940d0f7d..b6079b31eeb5a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) return nil } + // Check for duplicate label names. + labels := make(map[string]struct{}) + for _, l := range p.currentMetric.Label { + lName := l.GetName() + if _, exists := labels[lName]; !exists { + labels[lName] = struct{}{} + } else { + p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + return nil + } + } return p.startLabelValue } diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..9a1aff412704f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 31d42f7124c36..b9fb589aa1e04 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -407,6 +407,50 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + // firstNonEmptyLine advances the scanner to the first non-empty line // and returns the contents of that line func firstNonEmptyLine(scanner *bufio.Scanner) string { diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go similarity index 97% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm64.go rename to vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 4f5d172a35659..44b590ed38fa6 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -12,7 +12,7 @@ // limitations under the License. // +build linux -// +build arm64 +// +build arm arm64 package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go deleted file mode 100644 index 22d93f8ef0c24..0000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go deleted file mode 100644 index 22d93f8ef0c24..0000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go deleted file mode 100644 index 22d93f8ef0c24..0000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go similarity index 94% rename from vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go rename to vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 22d93f8ef0c24..91e272573a51a 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -12,6 +12,7 @@ // limitations under the License. // +build linux +// +build mips mipsle mips64 mips64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go similarity index 82% rename from vendor/github.com/prometheus/procfs/cpuinfo_arm.go rename to vendor/github.com/prometheus/procfs/cpuinfo_others.go index 83555077069a0..95b5b4ec44a53 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -12,7 +12,8 @@ // limitations under the License. // +build linux +// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs -var parseCPUInfo = parseCPUInfoARM +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go deleted file mode 100644 index 64aee9c63c078..0000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go similarity index 96% rename from vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go rename to vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 64aee9c63c078..6068bd571c24a 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -12,6 +12,7 @@ // limitations under the License. // +build linux +// +build ppc64 ppc64le package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go similarity index 100% rename from vendor/github.com/prometheus/procfs/cpuinfo_default.go rename to vendor/github.com/prometheus/procfs/cpuinfo_x86.go diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 868c8573d9254..12494d7424486 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -467,7 +467,7 @@ Pid: 26231 PPid: 1 TracerPid: 0 Uid: 1000 1000 1000 0 -Gid: 0 0 0 0 +Gid: 1001 1001 1001 0 FDSize: 128 Groups: NStgid: 1 @@ -1966,7 +1966,7 @@ Lines: 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/mdstat -Lines: 56 +Lines: 60 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) @@ -1989,6 +1989,10 @@ md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk @@ -3754,6 +3758,73 @@ Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/name +Lines: 1 +package-10 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/thermal Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index beefdf02e9342..da3a941d60b90 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -25,7 +25,7 @@ import ( type KernelRandom struct { // EntropyAvaliable gives the available entropy, in bits. EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bytes. + // PoolSize gives the size of the entropy pool, in bits. PoolSize *uint64 // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. URandomMinReseedSeconds *uint64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 3e9362a94d930..98e37aa8cafd0 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -107,11 +107,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { syncedBlocks := size recovering := strings.Contains(lines[syncLineIdx], "recovery") resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") // Append recovery and resyncing state info. - if recovering || resyncing { + if recovering || resyncing || checking { if recovering { state = "recovering" + } else if checking { + state = "checking" } else { state = "resyncing" } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c58346d910f70..6edd8333b334e 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -72,8 +72,10 @@ type ProcStatus struct { // Number of involuntary context switches. NonVoluntaryCtxtSwitches uint64 - // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string } // NewStatus returns the current status information of the process. @@ -119,6 +121,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.Name = vString case "Uid": copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 223c8abfe270a..75904f7638ad4 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -98,8 +98,9 @@ var ( // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ - RemoteTimeout: model.Duration(30 * time.Second), - QueueConfig: DefaultQueueConfig, + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + MetadataConfig: DefaultMetadataConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -121,6 +122,12 @@ var ( MaxBackoff: model.Duration(100 * time.Millisecond), } + // DefaultMetadataConfig is the default metadata configuration for a remote write endpoint. + DefaultMetadataConfig = MetadataConfig{ + Send: true, + SendInterval: model.Duration(1 * time.Minute), + } + // DefaultRemoteReadConfig is the default remote read configuration. DefaultRemoteReadConfig = RemoteReadConfig{ RemoteTimeout: model.Duration(1 * time.Minute), @@ -570,6 +577,7 @@ type RemoteWriteConfig struct { // values arbitrarily into the overflow maps of further-down types. HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` QueueConfig QueueConfig `yaml:"queue_config,omitempty"` + MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -623,6 +631,15 @@ type QueueConfig struct { MaxBackoff model.Duration `yaml:"max_backoff,omitempty"` } +// MetadataConfig is the configuration for sending metadata to remote +// storage. +type MetadataConfig struct { + // Send controls whether we send metric metadata to remote storage. + Send bool `yaml:"send"` + // SendInterval controls how frequently we send metric metadata. + SendInterval model.Duration `yaml:"send_interval"` +} + // RemoteReadConfig is the configuration for reading from remote storage. type RemoteReadConfig struct { URL *config.URL `yaml:"url"` diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index d77b193b35857..5b0402bdb0266 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -18,8 +18,8 @@ import ( "reflect" "github.com/go-kit/kit/log" - "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/discovery/targetgroup" ) diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go index 9e154454829a7..2e0b477cd7785 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go @@ -20,6 +20,7 @@ import ( "net/url" "time" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/go-kit/kit/log" "github.com/prometheus/common/config" @@ -41,6 +42,7 @@ var userAgent = fmt.Sprintf("Prometheus/%s", version.Version) var DefaultSDConfig = SDConfig{ RefreshInterval: model.Duration(60 * time.Second), Port: 80, + Filters: []Filter{}, } func init() { @@ -51,13 +53,21 @@ func init() { type SDConfig struct { HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` - Host string `yaml:"host"` - Role string `yaml:"role"` - Port int `yaml:"port"` + Host string `yaml:"host"` + Role string `yaml:"role"` + Port int `yaml:"port"` + Filters []Filter `yaml:"filters"` RefreshInterval model.Duration `yaml:"refresh_interval"` } +// Filter represent a filter that can be passed to Docker Swarm to reduce the +// amount of data received. +type Filter struct { + Name string `yaml:"name"` + Values []string `yaml:"values"` +} + // Name returns the name of the Config. func (*SDConfig) Name() string { return "dockerswarm" } @@ -99,9 +109,10 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // the Discoverer interface. type Discovery struct { *refresh.Discovery - client *client.Client - role string - port int + client *client.Client + role string + port int + filters filters.Args } // NewDiscovery returns a new Discovery which periodically refreshes its targets. @@ -123,6 +134,13 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { client.WithAPIVersionNegotiation(), } + d.filters = filters.NewArgs() + for _, f := range conf.Filters { + for _, v := range f.Values { + d.filters.Add(f.Name, v) + } + } + // There are other protocols than HTTP supported by the Docker daemon, like // unix, which are not supported by the HTTP client. Passing HTTP client // options to the Docker client makes those non-HTTP requests fail. diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go index 92e5ad888249f..7d70169a67dd8 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/network.go @@ -18,6 +18,7 @@ import ( "fmt" "github.com/docker/docker/api/types" + "github.com/prometheus/prometheus/util/strutil" ) diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go index 68d89a8fa1fb9..79727a94995fa 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/nodes.go @@ -48,7 +48,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - nodes, err := d.client.NodeList(ctx, types.NodeListOptions{}) + nodes, err := d.client.NodeList(ctx, types.NodeListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go index 2769bc59cb07f..ae46bfd4dc108 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/services.go @@ -46,7 +46,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, Source: "DockerSwarm", } - services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) + services, err := d.client.ServiceList(ctx, types.ServiceListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go index d42682a35ffaa..04ced3d11f592 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/tasks.go @@ -43,7 +43,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - tasks, err := d.client.TaskList(ctx, types.TaskListOptions{}) + tasks, err := d.client.TaskList(ctx, types.TaskListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go index 8ae4a05b70dd9..a53d6274ef70d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go +++ b/vendor/github.com/prometheus/prometheus/discovery/openstack/instance.go @@ -27,6 +27,7 @@ import ( "github.com/gophercloud/gophercloud/pagination" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go index 5f9a40546ae94..2ebb36cb29078 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/registry.go +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -21,8 +21,9 @@ import ( "strings" "sync" - "github.com/prometheus/prometheus/discovery/targetgroup" "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery/targetgroup" ) const ( diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 29545bbee0ee3..5856613503efa 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -32,13 +32,13 @@ import ( "github.com/go-kit/kit/log/level" "github.com/go-openapi/strfmt" "github.com/pkg/errors" - "go.uber.org/atomic" - "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go index b6b859fa7ff1a..5c11cc2eeef01 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go @@ -19,7 +19,7 @@ import ( "sort" "strconv" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" ) // Well-known label names used by Prometheus components. @@ -29,10 +29,11 @@ const ( BucketLabel = "le" InstanceName = "instance" - sep = '\xff' labelSep = '\xfe' ) +var seps = []byte{'\xff'} + // Label is a key/value pair of strings. type Label struct { Name, Value string @@ -70,10 +71,10 @@ func (ls Labels) Bytes(buf []byte) []byte { b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(sep) + b.WriteByte(seps[0]) } b.WriteString(l.Name) - b.WriteByte(sep) + b.WriteByte(seps[0]) b.WriteString(l.Value) } return b.Bytes() @@ -134,13 +135,26 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { // Hash returns a hash value for the label set. func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } - for _, v := range ls { b = append(b, v.Name...) - b = append(b, sep) + b = append(b, seps[0]) b = append(b, v.Value...) - b = append(b, sep) + b = append(b, seps[0]) } return xxhash.Sum64(b) } @@ -157,9 +171,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ } else { b = append(b, ls[i].Name...) - b = append(b, sep) + b = append(b, seps[0]) b = append(b, ls[i].Value...) - b = append(b, sep) + b = append(b, seps[0]) i++ j++ } @@ -181,9 +195,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, sep) + b = append(b, seps[0]) b = append(b, ls[i].Value...) - b = append(b, sep) + b = append(b, seps[0]) } return xxhash.Sum64(b), b } diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go index cfcd05e210f04..557e566622bab 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go @@ -85,12 +85,12 @@ const ( type MetricType string const ( - MetricTypeCounter = "counter" - MetricTypeGauge = "gauge" - MetricTypeHistogram = "histogram" - MetricTypeGaugeHistogram = "gaugehistogram" - MetricTypeSummary = "summary" - MetricTypeInfo = "info" - MetricTypeStateset = "stateset" - MetricTypeUnknown = "unknown" + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") ) diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go index 3f34afe4c2bb7..2a5bd8c8ea0e8 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -62,10 +62,11 @@ func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { } type WriteRequest struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` + Metadata []MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } @@ -108,6 +109,13 @@ func (m *WriteRequest) GetTimeseries() []TimeSeries { return nil } +func (m *WriteRequest) GetMetadata() []MetricMetadata { + if m != nil { + return m.Metadata + } + return nil +} + // ReadRequest represents a remote read request. type ReadRequest struct { Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` @@ -410,37 +418,38 @@ func init() { func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } var fileDescriptor_eefc82927d57d89b = []byte{ - // 466 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xbb, 0x4d, 0xdb, 0xa0, 0x71, 0x88, 0xc2, 0xb6, 0x25, 0xa6, 0x87, 0x34, 0xb2, 0x38, - 0x58, 0x2a, 0x0a, 0x22, 0x54, 0x9c, 0x38, 0x90, 0x96, 0x48, 0x45, 0x24, 0xfc, 0x59, 0x07, 0x81, - 0x10, 0x92, 0xe5, 0xd8, 0xa3, 0xc6, 0xa2, 0xfe, 0xd3, 0xdd, 0xb5, 0xd4, 0xbc, 0x1e, 0xa7, 0x9e, - 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0xed, 0xda, 0x0e, 0x1b, 0xb8, 0x70, 0x5b, 0x7f, 0xdf, 0x37, - 0x3f, 0xef, 0x8c, 0xc7, 0xd0, 0xe2, 0x98, 0x64, 0x12, 0x07, 0x39, 0xcf, 0x64, 0x46, 0x21, 0xe7, - 0x59, 0x82, 0x72, 0x81, 0x85, 0x38, 0xb2, 0xe4, 0x32, 0x47, 0x51, 0x1a, 0x47, 0x07, 0x97, 0xd9, - 0x65, 0xa6, 0x8f, 0x8f, 0xd5, 0xa9, 0x54, 0x9d, 0x09, 0xb4, 0x3e, 0xf2, 0x58, 0x22, 0xc3, 0xeb, - 0x02, 0x85, 0xa4, 0xcf, 0x01, 0x64, 0x9c, 0xa0, 0x40, 0x1e, 0xa3, 0xb0, 0x49, 0xbf, 0xe1, 0x5a, - 0xc3, 0xfb, 0x83, 0x3f, 0xcc, 0xc1, 0x2c, 0x4e, 0xd0, 0xd3, 0xee, 0xd9, 0xce, 0xed, 0xcf, 0xe3, - 0x2d, 0x66, 0xe4, 0x9d, 0xef, 0x04, 0x2c, 0x86, 0x41, 0x54, 0xd3, 0x4e, 0xa0, 0x79, 0x5d, 0x98, - 0xa8, 0x7b, 0x26, 0xea, 0x7d, 0x81, 0x7c, 0xc9, 0xea, 0x04, 0xfd, 0x02, 0xdd, 0x20, 0x0c, 0x31, - 0x97, 0x18, 0xf9, 0x1c, 0x45, 0x9e, 0xa5, 0x02, 0x7d, 0xdd, 0x81, 0xbd, 0xdd, 0x6f, 0xb8, 0xed, - 0xe1, 0x43, 0xb3, 0xd8, 0x78, 0xcd, 0x80, 0x55, 0xe9, 0xd9, 0x32, 0x47, 0x76, 0x58, 0x43, 0x4c, - 0x55, 0x38, 0xa7, 0xd0, 0x32, 0x05, 0x6a, 0x41, 0xd3, 0x1b, 0x4d, 0xdf, 0x4d, 0xc6, 0x5e, 0x67, - 0x8b, 0x76, 0x61, 0xdf, 0x9b, 0xb1, 0xf1, 0x68, 0x3a, 0x7e, 0xe9, 0x7f, 0x7a, 0xcb, 0xfc, 0xf3, - 0x8b, 0x0f, 0x6f, 0x5e, 0x7b, 0x1d, 0xe2, 0x8c, 0x54, 0x55, 0xb0, 0x46, 0xd1, 0x27, 0xd0, 0xe4, - 0x28, 0x8a, 0x2b, 0x59, 0x37, 0xd4, 0xfd, 0xb7, 0x21, 0xed, 0xb3, 0x3a, 0xe7, 0x7c, 0x23, 0xb0, - 0xab, 0x0d, 0xfa, 0x08, 0xa8, 0x90, 0x01, 0x97, 0xbe, 0x9e, 0x98, 0x0c, 0x92, 0xdc, 0x4f, 0x14, - 0x87, 0xb8, 0x0d, 0xd6, 0xd1, 0xce, 0xac, 0x36, 0xa6, 0x82, 0xba, 0xd0, 0xc1, 0x34, 0xda, 0xcc, - 0x6e, 0xeb, 0x6c, 0x1b, 0xd3, 0xc8, 0x4c, 0x9e, 0xc2, 0x9d, 0x24, 0x90, 0xe1, 0x02, 0xb9, 0xb0, - 0x1b, 0xfa, 0x56, 0xb6, 0x79, 0xab, 0x49, 0x30, 0xc7, 0xab, 0x69, 0x19, 0x60, 0xeb, 0x24, 0x3d, - 0x81, 0xdd, 0x45, 0x9c, 0x4a, 0x61, 0xef, 0xf4, 0x89, 0x6b, 0x0d, 0x0f, 0xff, 0x1e, 0xee, 0x85, - 0x32, 0x59, 0x99, 0x71, 0xc6, 0x60, 0x19, 0xcd, 0xd1, 0x67, 0xff, 0xbf, 0x25, 0x1b, 0xfb, 0x71, - 0x03, 0xfb, 0xe7, 0x8b, 0x22, 0xfd, 0xaa, 0x3e, 0x8e, 0x31, 0xd5, 0x17, 0xd0, 0x0e, 0x4b, 0xd9, - 0xdf, 0x40, 0x3e, 0x30, 0x91, 0x55, 0x61, 0x45, 0xbd, 0x1b, 0x9a, 0x8f, 0xf4, 0x18, 0x2c, 0xb5, - 0x46, 0x4b, 0x3f, 0x4e, 0x23, 0xbc, 0xa9, 0xe6, 0x04, 0x5a, 0x7a, 0xa5, 0x94, 0xb3, 0x83, 0xdb, - 0x55, 0x8f, 0xfc, 0x58, 0xf5, 0xc8, 0xaf, 0x55, 0x8f, 0x7c, 0xde, 0x53, 0xdc, 0x7c, 0x3e, 0xdf, - 0xd3, 0x3f, 0xc1, 0xd3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xb6, 0x6b, 0xcd, 0x43, 0x03, - 0x00, 0x00, + // 496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xee, 0x26, 0x69, 0x13, 0x8d, 0x43, 0x14, 0xb6, 0x2d, 0x09, 0x39, 0xa4, 0x91, 0xc5, 0x21, + 0x52, 0x51, 0x10, 0xa1, 0xe2, 0xd4, 0x03, 0x69, 0x89, 0x54, 0xa0, 0xe6, 0x67, 0x13, 0x04, 0x42, + 0x48, 0xd6, 0xc6, 0x1e, 0x35, 0x16, 0xf5, 0x4f, 0x77, 0xd7, 0x52, 0xf3, 0x16, 0x3c, 0x13, 0xa7, + 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x2d, 0x5c, 0xb8, 0xad, 0xbf, 0x3f, + 0xcf, 0xcc, 0xce, 0x42, 0x53, 0x60, 0x18, 0x2b, 0x1c, 0x25, 0x22, 0x56, 0x31, 0x85, 0x44, 0xc4, + 0x21, 0xaa, 0x25, 0xa6, 0xb2, 0x67, 0xa9, 0x55, 0x82, 0x32, 0x27, 0x7a, 0x7b, 0x17, 0xf1, 0x45, + 0xac, 0x8f, 0x8f, 0xb2, 0x53, 0x8e, 0xda, 0x5f, 0x09, 0x34, 0x3f, 0x88, 0x40, 0x21, 0xc3, 0xab, + 0x14, 0xa5, 0xa2, 0xc7, 0x00, 0x2a, 0x08, 0x51, 0xa2, 0x08, 0x50, 0x76, 0xc9, 0xa0, 0x3a, 0xb4, + 0xc6, 0xf7, 0x46, 0x7f, 0x42, 0x47, 0xf3, 0x20, 0xc4, 0x99, 0x66, 0x4f, 0x6a, 0x37, 0x3f, 0x0f, + 0xb6, 0x98, 0xa1, 0xa7, 0xc7, 0xd0, 0x08, 0x51, 0x71, 0x9f, 0x2b, 0xde, 0xad, 0x6a, 0x6f, 0xcf, + 0xf4, 0x3a, 0xa8, 0x44, 0xe0, 0x39, 0x85, 0xa2, 0xf0, 0x6f, 0x1c, 0x2f, 0x6b, 0x8d, 0x4a, 0xbb, + 0x6a, 0x7f, 0x27, 0x60, 0x31, 0xe4, 0x7e, 0x59, 0xd1, 0x21, 0xd4, 0xaf, 0x52, 0xb3, 0x9c, 0xbb, + 0x66, 0xe4, 0xbb, 0x14, 0xc5, 0x8a, 0x95, 0x0a, 0xfa, 0x19, 0x3a, 0xdc, 0xf3, 0x30, 0x51, 0xe8, + 0xbb, 0x02, 0x65, 0x12, 0x47, 0x12, 0x5d, 0x3d, 0x86, 0x6e, 0x65, 0x50, 0x1d, 0xb6, 0xc6, 0x0f, + 0x4c, 0xb3, 0xf1, 0x9b, 0x11, 0x2b, 0xd4, 0xf3, 0x55, 0x82, 0x6c, 0xbf, 0x0c, 0x31, 0x51, 0x69, + 0x1f, 0x41, 0xd3, 0x04, 0xa8, 0x05, 0xf5, 0xd9, 0xc4, 0x79, 0x7b, 0x3e, 0x9d, 0xb5, 0xb7, 0x68, + 0x07, 0x76, 0x67, 0x73, 0x36, 0x9d, 0x38, 0xd3, 0xe7, 0xee, 0xc7, 0x37, 0xcc, 0x3d, 0x3d, 0x7b, + 0xff, 0xfa, 0xd5, 0xac, 0x4d, 0xec, 0x49, 0xe6, 0xe2, 0x9b, 0x28, 0xfa, 0x18, 0xea, 0x02, 0x65, + 0x7a, 0xa9, 0xca, 0x86, 0x3a, 0xff, 0x36, 0xa4, 0x79, 0x56, 0xea, 0xec, 0x6f, 0x04, 0xb6, 0x35, + 0x41, 0x1f, 0x02, 0x95, 0x8a, 0x0b, 0xe5, 0xea, 0xa9, 0x2b, 0x1e, 0x26, 0x6e, 0x98, 0xe5, 0x90, + 0x61, 0x95, 0xb5, 0x35, 0x33, 0x2f, 0x09, 0x47, 0xd2, 0x21, 0xb4, 0x31, 0xf2, 0x6f, 0x6b, 0x2b, + 0x5a, 0xdb, 0xc2, 0xc8, 0x37, 0x95, 0x47, 0xd0, 0x08, 0xb9, 0xf2, 0x96, 0x28, 0x64, 0x71, 0x73, + 0x5d, 0xb3, 0xaa, 0x73, 0xbe, 0xc0, 0x4b, 0x27, 0x17, 0xb0, 0x8d, 0x92, 0x1e, 0xc2, 0xf6, 0x32, + 0x88, 0x94, 0xec, 0xd6, 0x06, 0x64, 0x68, 0x8d, 0xf7, 0xff, 0x1e, 0xee, 0x59, 0x46, 0xb2, 0x5c, + 0x63, 0x4f, 0xc1, 0x32, 0x9a, 0xa3, 0x4f, 0xff, 0x7f, 0xd3, 0xcc, 0x1d, 0xb3, 0xaf, 0x61, 0xf7, + 0x74, 0x99, 0x46, 0x5f, 0xb2, 0xcb, 0x31, 0xa6, 0xfa, 0x0c, 0x5a, 0x5e, 0x0e, 0xbb, 0xb7, 0x22, + 0xef, 0x9b, 0x91, 0x85, 0xb1, 0x48, 0xbd, 0xe3, 0x99, 0x9f, 0xf4, 0x00, 0xac, 0x6c, 0x8d, 0x56, + 0x6e, 0x10, 0xf9, 0x78, 0x5d, 0xcc, 0x09, 0x34, 0xf4, 0x22, 0x43, 0x4e, 0xf6, 0x6e, 0xd6, 0x7d, + 0xf2, 0x63, 0xdd, 0x27, 0xbf, 0xd6, 0x7d, 0xf2, 0x69, 0x27, 0xcb, 0x4d, 0x16, 0x8b, 0x1d, 0xfd, + 0x92, 0x9e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x13, 0x18, 0x12, 0x0a, 0x88, 0x03, 0x00, 0x00, } func (m *WriteRequest) Marshal() (dAtA []byte, err error) { @@ -467,6 +476,20 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Timeseries) > 0 { for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { { @@ -757,6 +780,12 @@ func (m *WriteRequest) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -942,6 +971,40 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata, MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRemote(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto index ecd8f0bb19884..70c6dd3fbbc0d 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -21,6 +21,10 @@ import "gogoproto/gogo.proto"; message WriteRequest { repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + // Cortex uses this field to determine the source of the write request. + // We reserve it to avoid any compatibility issues. + reserved 2; + repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; } // ReadRequest represents a remote read request. diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index 9c6e26a2ce138..5e593b73d2ce6 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -25,6 +25,49 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type MetricMetadata_MetricType int32 + +const ( + MetricMetadata_UNKNOWN MetricMetadata_MetricType = 0 + MetricMetadata_COUNTER MetricMetadata_MetricType = 1 + MetricMetadata_GAUGE MetricMetadata_MetricType = 2 + MetricMetadata_HISTOGRAM MetricMetadata_MetricType = 3 + MetricMetadata_GAUGEHISTOGRAM MetricMetadata_MetricType = 4 + MetricMetadata_SUMMARY MetricMetadata_MetricType = 5 + MetricMetadata_INFO MetricMetadata_MetricType = 6 + MetricMetadata_STATESET MetricMetadata_MetricType = 7 +) + +var MetricMetadata_MetricType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COUNTER", + 2: "GAUGE", + 3: "HISTOGRAM", + 4: "GAUGEHISTOGRAM", + 5: "SUMMARY", + 6: "INFO", + 7: "STATESET", +} + +var MetricMetadata_MetricType_value = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +func (x MetricMetadata_MetricType) String() string { + return proto.EnumName(MetricMetadata_MetricType_name, int32(x)) +} + +func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0, 0} +} + type LabelMatcher_Type int32 const ( @@ -53,7 +96,7 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4, 0} + return fileDescriptor_d938547f84707355, []int{5, 0} } // We require this to match chunkenc.Encoding. @@ -79,7 +122,80 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6, 0} + return fileDescriptor_d938547f84707355, []int{7, 0} +} + +type MetricMetadata struct { + // Represents the metric type, these match the set from Prometheus. + // Refer to pkg/textparse/interface.go for details. + Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` + MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } +func (m *MetricMetadata) String() string { return proto.CompactTextString(m) } +func (*MetricMetadata) ProtoMessage() {} +func (*MetricMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} +func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricMetadata.Merge(m, src) +} +func (m *MetricMetadata) XXX_Size() int { + return m.Size() +} +func (m *MetricMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MetricMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo + +func (m *MetricMetadata) GetType() MetricMetadata_MetricType { + if m != nil { + return m.Type + } + return MetricMetadata_UNKNOWN +} + +func (m *MetricMetadata) GetMetricFamilyName() string { + if m != nil { + return m.MetricFamilyName + } + return "" +} + +func (m *MetricMetadata) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricMetadata) GetUnit() string { + if m != nil { + return m.Unit + } + return "" } type Sample struct { @@ -94,7 +210,7 @@ func (m *Sample) Reset() { *m = Sample{} } func (m *Sample) String() string { return proto.CompactTextString(m) } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{0} + return fileDescriptor_d938547f84707355, []int{1} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -150,7 +266,7 @@ func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{1} + return fileDescriptor_d938547f84707355, []int{2} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -205,7 +321,7 @@ func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{2} + return fileDescriptor_d938547f84707355, []int{3} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -259,7 +375,7 @@ func (m *Labels) Reset() { *m = Labels{} } func (m *Labels) String() string { return proto.CompactTextString(m) } func (*Labels) ProtoMessage() {} func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{4} } func (m *Labels) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -309,7 +425,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{5} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -376,7 +492,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{6} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +586,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{7} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -542,7 +658,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} + return fileDescriptor_d938547f84707355, []int{8} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,8 +702,10 @@ func (m *ChunkedSeries) GetChunks() []Chunk { } func init() { + proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) + proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") @@ -601,41 +719,104 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 539 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xee, 0xda, 0x89, 0x9d, 0x4c, 0x4a, 0x95, 0xae, 0x8a, 0x30, 0x15, 0x04, 0xcb, 0x27, 0x9f, - 0x5c, 0x35, 0x9c, 0x90, 0x38, 0x15, 0x45, 0x42, 0xa2, 0x4e, 0xd5, 0x6d, 0x11, 0x88, 0x4b, 0xb5, - 0x89, 0x17, 0xc7, 0x22, 0x5e, 0xbb, 0xde, 0x0d, 0x6a, 0x1e, 0x84, 0xc7, 0xe0, 0xc0, 0x5b, 0xf4, - 0xc8, 0x13, 0x20, 0x94, 0x27, 0x41, 0x3b, 0x76, 0x7e, 0xa4, 0x72, 0x81, 0xdb, 0xfc, 0x7c, 0xf3, - 0x7d, 0x9f, 0x77, 0xc6, 0xd0, 0xd3, 0xcb, 0x52, 0xa8, 0xa8, 0xac, 0x0a, 0x5d, 0x50, 0x28, 0xab, - 0x22, 0x17, 0x7a, 0x26, 0x16, 0xea, 0xf8, 0x28, 0x2d, 0xd2, 0x02, 0xcb, 0x27, 0x26, 0xaa, 0x11, - 0xc1, 0x6b, 0x70, 0xae, 0x78, 0x5e, 0xce, 0x05, 0x3d, 0x82, 0xf6, 0x57, 0x3e, 0x5f, 0x08, 0x8f, - 0xf8, 0x24, 0x24, 0xac, 0x4e, 0xe8, 0x33, 0xe8, 0xea, 0x2c, 0x17, 0x4a, 0xf3, 0xbc, 0xf4, 0x2c, - 0x9f, 0x84, 0x36, 0xdb, 0x16, 0x82, 0x5b, 0x80, 0xeb, 0x2c, 0x17, 0x57, 0xa2, 0xca, 0x84, 0xa2, - 0x27, 0xe0, 0xcc, 0xf9, 0x44, 0xcc, 0x95, 0x47, 0x7c, 0x3b, 0xec, 0x0d, 0x0f, 0xa3, 0xad, 0x7c, - 0x74, 0x6e, 0x3a, 0x67, 0xad, 0xfb, 0x5f, 0x2f, 0xf6, 0x58, 0x03, 0xa3, 0x43, 0x70, 0x15, 0x8a, - 0x2b, 0xcf, 0xc2, 0x09, 0xba, 0x3b, 0x51, 0xfb, 0x6a, 0x46, 0xd6, 0xc0, 0xe0, 0x14, 0xda, 0x48, - 0x45, 0x29, 0xb4, 0x24, 0xcf, 0x6b, 0xbb, 0x5d, 0x86, 0xf1, 0xf6, 0x1b, 0x2c, 0x2c, 0xd6, 0x49, - 0xf0, 0x0a, 0x9c, 0xf3, 0x5a, 0xf0, 0x5f, 0x1d, 0x06, 0xdf, 0x08, 0xec, 0x63, 0x3d, 0xe6, 0x7a, - 0x3a, 0x13, 0x15, 0x3d, 0x85, 0x96, 0x79, 0x60, 0x54, 0x3d, 0x18, 0x3e, 0x7f, 0x30, 0xdf, 0xe0, - 0xa2, 0xeb, 0x65, 0x29, 0x18, 0x42, 0x37, 0x46, 0xad, 0xbf, 0x19, 0xb5, 0x77, 0x8d, 0x86, 0xd0, - 0x32, 0x73, 0xd4, 0x01, 0x6b, 0x74, 0xd9, 0xdf, 0xa3, 0x2e, 0xd8, 0xe3, 0xd1, 0x65, 0x9f, 0x98, - 0x02, 0x1b, 0xf5, 0x2d, 0x2c, 0xb0, 0x51, 0xdf, 0x0e, 0x7e, 0x10, 0xe8, 0x32, 0xc1, 0x93, 0xb7, - 0x99, 0xd4, 0x8a, 0x3e, 0x01, 0x57, 0x69, 0x51, 0xde, 0xe4, 0x0a, 0x7d, 0xd9, 0xcc, 0x31, 0x69, - 0xac, 0x8c, 0xf4, 0xe7, 0x85, 0x9c, 0xae, 0xa5, 0x4d, 0x4c, 0x9f, 0x42, 0x47, 0x69, 0x5e, 0x69, - 0x83, 0xb6, 0x11, 0xed, 0x62, 0x1e, 0x2b, 0xfa, 0x18, 0x1c, 0x21, 0x13, 0xd3, 0x68, 0x61, 0xa3, - 0x2d, 0x64, 0x12, 0x2b, 0x7a, 0x0c, 0x9d, 0xb4, 0x2a, 0x16, 0x65, 0x26, 0x53, 0xaf, 0xed, 0xdb, - 0x61, 0x97, 0x6d, 0x72, 0x7a, 0x00, 0xd6, 0x64, 0xe9, 0x39, 0x3e, 0x09, 0x3b, 0xcc, 0x9a, 0x2c, - 0x0d, 0x7b, 0xc5, 0x65, 0x2a, 0x0c, 0x89, 0x5b, 0xb3, 0x63, 0x1e, 0xab, 0xe0, 0x3b, 0x81, 0xf6, - 0x9b, 0xd9, 0x42, 0x7e, 0xa1, 0x03, 0xe8, 0xe5, 0x99, 0xbc, 0x31, 0x77, 0xb4, 0xf5, 0xdc, 0xcd, - 0x33, 0x69, 0x8e, 0x29, 0x56, 0xd8, 0xe7, 0x77, 0x9b, 0x7e, 0x73, 0x76, 0x39, 0xbf, 0x6b, 0xfa, - 0x51, 0xb3, 0x04, 0x1b, 0x97, 0x70, 0xbc, 0xbb, 0x04, 0x14, 0x88, 0x46, 0x72, 0x5a, 0x24, 0x99, - 0x4c, 0xb7, 0x1b, 0x48, 0xb8, 0xe6, 0xf8, 0x55, 0xfb, 0x0c, 0xe3, 0xc0, 0x87, 0xce, 0x1a, 0x45, - 0x7b, 0xe0, 0xbe, 0x1f, 0xbf, 0x1b, 0x5f, 0x7c, 0x18, 0xd7, 0x8f, 0xfe, 0xf1, 0x82, 0xf5, 0x49, - 0x70, 0x0b, 0x8f, 0x90, 0x4d, 0x24, 0xff, 0x7b, 0xdf, 0x27, 0xe0, 0x4c, 0x0d, 0xc3, 0xfa, 0xbc, - 0x0f, 0x1f, 0x38, 0x5d, 0x0f, 0xd4, 0xb0, 0xb3, 0xa3, 0xfb, 0xd5, 0x80, 0xfc, 0x5c, 0x0d, 0xc8, - 0xef, 0xd5, 0x80, 0x7c, 0x72, 0x0c, 0xba, 0x9c, 0x4c, 0x1c, 0xfc, 0x55, 0x5f, 0xfe, 0x09, 0x00, - 0x00, 0xff, 0xff, 0xed, 0x99, 0x84, 0x88, 0xdb, 0x03, 0x00, 0x00, + // 690 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0xda, 0x40, + 0x10, 0xce, 0xfa, 0x17, 0x86, 0x04, 0x39, 0xab, 0x54, 0x75, 0xa3, 0x96, 0x22, 0x4b, 0x95, 0x38, + 0x54, 0x44, 0x49, 0x4f, 0x91, 0x7a, 0x21, 0x91, 0xf3, 0xa3, 0xc6, 0xa0, 0x2c, 0xa0, 0xfe, 0x5c, + 0xd0, 0x02, 0x1b, 0xb0, 0x8a, 0x8d, 0xe3, 0x5d, 0xaa, 0xf0, 0x20, 0xbd, 0xf5, 0x15, 0x7a, 0xe8, + 0x5b, 0xe4, 0xd8, 0x27, 0xa8, 0xaa, 0x3c, 0x49, 0xb5, 0x6b, 0x13, 0x13, 0xa5, 0x97, 0xf6, 0x36, + 0xf3, 0x7d, 0xdf, 0xfc, 0xec, 0xcc, 0xd8, 0x50, 0x11, 0xcb, 0x84, 0xf1, 0x66, 0x92, 0xce, 0xc5, + 0x1c, 0x43, 0x92, 0xce, 0x23, 0x26, 0xa6, 0x6c, 0xc1, 0x77, 0x77, 0x26, 0xf3, 0xc9, 0x5c, 0xc1, + 0x7b, 0xd2, 0xca, 0x14, 0xde, 0x37, 0x0d, 0xaa, 0x01, 0x13, 0x69, 0x38, 0x0a, 0x98, 0xa0, 0x63, + 0x2a, 0x28, 0x3e, 0x04, 0x43, 0xe6, 0x70, 0x51, 0x1d, 0x35, 0xaa, 0x07, 0xaf, 0x9a, 0x45, 0x8e, + 0xe6, 0x43, 0x65, 0xee, 0xf6, 0x96, 0x09, 0x23, 0x2a, 0x04, 0xbf, 0x06, 0x1c, 0x29, 0x6c, 0x70, + 0x45, 0xa3, 0x70, 0xb6, 0x1c, 0xc4, 0x34, 0x62, 0xae, 0x56, 0x47, 0x8d, 0x32, 0x71, 0x32, 0xe6, + 0x44, 0x11, 0x6d, 0x1a, 0x31, 0x8c, 0xc1, 0x98, 0xb2, 0x59, 0xe2, 0x1a, 0x8a, 0x57, 0xb6, 0xc4, + 0x16, 0x71, 0x28, 0x5c, 0x33, 0xc3, 0xa4, 0xed, 0x2d, 0x01, 0x8a, 0x4a, 0xb8, 0x02, 0x76, 0xbf, + 0xfd, 0xae, 0xdd, 0x79, 0xdf, 0x76, 0x36, 0xa4, 0x73, 0xdc, 0xe9, 0xb7, 0x7b, 0x3e, 0x71, 0x10, + 0x2e, 0x83, 0x79, 0xda, 0xea, 0x9f, 0xfa, 0x8e, 0x86, 0xb7, 0xa0, 0x7c, 0x76, 0xde, 0xed, 0x75, + 0x4e, 0x49, 0x2b, 0x70, 0x74, 0x8c, 0xa1, 0xaa, 0x98, 0x02, 0x33, 0x64, 0x68, 0xb7, 0x1f, 0x04, + 0x2d, 0xf2, 0xd1, 0x31, 0x71, 0x09, 0x8c, 0xf3, 0xf6, 0x49, 0xc7, 0xb1, 0xf0, 0x26, 0x94, 0xba, + 0xbd, 0x56, 0xcf, 0xef, 0xfa, 0x3d, 0xc7, 0xf6, 0xde, 0x82, 0xd5, 0xa5, 0x51, 0x32, 0x63, 0x78, + 0x07, 0xcc, 0x2f, 0x74, 0xb6, 0xc8, 0xc6, 0x82, 0x48, 0xe6, 0xe0, 0xe7, 0x50, 0x16, 0x61, 0xc4, + 0xb8, 0xa0, 0x51, 0xa2, 0xde, 0xa9, 0x93, 0x02, 0xf0, 0xae, 0x01, 0x7a, 0x61, 0xc4, 0xba, 0x2c, + 0x0d, 0x19, 0xc7, 0x7b, 0x60, 0xcd, 0xe8, 0x90, 0xcd, 0xb8, 0x8b, 0xea, 0x7a, 0xa3, 0x72, 0xb0, + 0xbd, 0x3e, 0xd9, 0x0b, 0xc9, 0x1c, 0x19, 0xb7, 0xbf, 0x5e, 0x6e, 0x90, 0x5c, 0x86, 0x0f, 0xc0, + 0xe6, 0xaa, 0x38, 0x77, 0x35, 0x15, 0x81, 0xd7, 0x23, 0xb2, 0xbe, 0xf2, 0x90, 0x95, 0xd0, 0xdb, + 0x07, 0x53, 0xa5, 0x92, 0x83, 0x54, 0xc3, 0x47, 0xd9, 0x20, 0xa5, 0x5d, 0xbc, 0x21, 0xdb, 0x48, + 0xe6, 0x78, 0x87, 0x60, 0x5d, 0x64, 0x05, 0xff, 0xb5, 0x43, 0xef, 0x2b, 0x82, 0x4d, 0x85, 0x07, + 0x54, 0x8c, 0xa6, 0x2c, 0xc5, 0xfb, 0x0f, 0x6e, 0xe7, 0xc5, 0xa3, 0xf8, 0x5c, 0xd7, 0x5c, 0xbb, + 0x99, 0x55, 0xa3, 0xda, 0xdf, 0x1a, 0xd5, 0xd7, 0x1b, 0x6d, 0x80, 0xa1, 0x2e, 0xc0, 0x02, 0xcd, + 0xbf, 0x74, 0x36, 0xb0, 0x0d, 0x7a, 0xdb, 0xbf, 0x74, 0x90, 0x04, 0x88, 0xdc, 0xba, 0x04, 0x88, + 0xef, 0xe8, 0xde, 0x0f, 0x04, 0x65, 0xc2, 0xe8, 0xf8, 0x2c, 0x8c, 0x05, 0xc7, 0x4f, 0xc1, 0xe6, + 0x82, 0x25, 0x83, 0x88, 0xab, 0xbe, 0x74, 0x62, 0x49, 0x37, 0xe0, 0xb2, 0xf4, 0xd5, 0x22, 0x1e, + 0xad, 0x4a, 0x4b, 0x1b, 0x3f, 0x83, 0x12, 0x17, 0x34, 0x15, 0x52, 0xad, 0x2b, 0xb5, 0xad, 0xfc, + 0x80, 0xe3, 0x27, 0x60, 0xb1, 0x78, 0x2c, 0x09, 0x43, 0x11, 0x26, 0x8b, 0xc7, 0x01, 0xc7, 0xbb, + 0x50, 0x9a, 0xa4, 0xf3, 0x45, 0x12, 0xc6, 0x13, 0xd7, 0xac, 0xeb, 0x8d, 0x32, 0xb9, 0xf7, 0x71, + 0x15, 0xb4, 0xe1, 0xd2, 0xb5, 0xea, 0xa8, 0x51, 0x22, 0xda, 0x70, 0x29, 0xb3, 0xa7, 0x34, 0x9e, + 0x30, 0x99, 0xc4, 0xce, 0xb2, 0x2b, 0x3f, 0xe0, 0xde, 0x77, 0x04, 0xe6, 0xf1, 0x74, 0x11, 0x7f, + 0xc6, 0x35, 0xa8, 0x44, 0x61, 0x3c, 0x90, 0x77, 0x54, 0xf4, 0x5c, 0x8e, 0xc2, 0x58, 0x1e, 0x53, + 0xc0, 0x15, 0x4f, 0x6f, 0xee, 0xf9, 0xfc, 0xec, 0x22, 0x7a, 0x93, 0xf3, 0xcd, 0x7c, 0x09, 0xba, + 0x5a, 0xc2, 0xee, 0xfa, 0x12, 0x54, 0x81, 0xa6, 0x1f, 0x8f, 0xe6, 0xe3, 0x30, 0x9e, 0x14, 0x1b, + 0x90, 0x9f, 0xb3, 0x7a, 0xd5, 0x26, 0x51, 0xb6, 0x57, 0x87, 0xd2, 0x4a, 0xf5, 0xf0, 0x8b, 0xb3, + 0x41, 0xff, 0xd0, 0x21, 0x0e, 0xf2, 0xae, 0x61, 0x4b, 0x65, 0x63, 0xe3, 0xff, 0xbd, 0xef, 0x3d, + 0xb0, 0x46, 0x32, 0xc3, 0xea, 0xbc, 0xb7, 0x1f, 0x75, 0xba, 0x0a, 0xc8, 0x64, 0x47, 0x3b, 0xb7, + 0x77, 0x35, 0xf4, 0xf3, 0xae, 0x86, 0x7e, 0xdf, 0xd5, 0xd0, 0x27, 0x4b, 0xaa, 0x93, 0xe1, 0xd0, + 0x52, 0x7f, 0xb2, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xb7, 0x12, 0x44, 0xfa, 0x04, + 0x00, 0x00, +} + +func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x22 + } + if len(m.MetricFamilyName) > 0 { + i -= len(m.MetricFamilyName) + copy(dAtA[i:], m.MetricFamilyName) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MetricFamilyName))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *Sample) Marshal() (dAtA []byte, err error) { @@ -1047,6 +1228,33 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.MetricFamilyName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Sample) Size() (n int) { if m == nil { return 0 @@ -1242,6 +1450,175 @@ func sovTypes(x uint64) (n int) { func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (m *MetricMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricMetadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricFamilyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Sample) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index de437d182728c..259a0d40d3816 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -18,6 +18,26 @@ option go_package = "prompb"; import "gogoproto/gogo.proto"; +message MetricMetadata { + enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; + } + + // Represents the metric type, these match the set from Prometheus. + // Refer to pkg/textparse/interface.go for details. + MetricType type = 1; + string metric_family_name = 2; + string help = 4; + string unit = 5; +} + message Sample { double value = 1; int64 timestamp = 2; diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index c21319b416c46..81bd70d5fbe79 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -1027,6 +1027,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // Matrix evaluation always returns the evaluation time, // so this function needs special handling when given // a vector selector. + unwrapParenExpr(&e.Args[0]) vs, ok := e.Args[0].(*parser.VectorSelector) if ok { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index cf26fcc472490..4117f0e715077 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -3,11 +3,8 @@ //line generated_parser.y:15 package parser -import __yyfmt__ "fmt" - -//line generated_parser.y:15 - import ( + __yyfmt__ "fmt" "math" "sort" "strconv" @@ -15,7 +12,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/value" -) +) //line generated_parser.y:15 //line generated_parser.y:28 type yySymType struct { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 50d69b21d2701..99879445d8cc3 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -577,7 +577,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) { case *SubqueryExpr: ty := p.checkAST(n.Expr) if ty != ValueTypeVector { - p.addParseErrf(n.PositionRange(), "subquery is only allowed on instant vector, got %s in %q instead", ty, n.String()) + p.addParseErrf(n.PositionRange(), "subquery is only allowed on instant vector, got %s instead", ty) } case *MatrixSelector: p.checkAST(n.VectorSelector) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go index e187000af7435..eef4aa8e418a9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go @@ -20,6 +20,7 @@ import ( "time" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" ) diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index bcd90200f876b..7992c5b23a880 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index a28f06bbae863..fa3a71d352d37 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -21,11 +21,10 @@ import ( "github.com/pkg/errors" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix } diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index c82bc9640acd1..a4074221f9fbb 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -16,19 +16,17 @@ package rules import ( "context" "fmt" + html_template "html/template" "net/url" "strings" "sync" "time" - html_template "html/template" - - yaml "gopkg.in/yaml.v2" - "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/common/model" + yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/rulefmt" diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 26bb0d1a5754f..bf73561a12512 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -26,8 +26,8 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index d52b0ac7f3149..bd89b69f6f22a 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -191,17 +191,20 @@ func init() { type scrapePool struct { appendable storage.Appendable logger log.Logger + cancel context.CancelFunc - mtx sync.Mutex - config *config.ScrapeConfig - client *http.Client - // Targets and loops must always be synchronized to have the same + // mtx must not be taken after targetMtx. + mtx sync.Mutex + config *config.ScrapeConfig + client *http.Client + loops map[uint64]loop + targetLimitHit bool // Internal state to speed up the target_limit checks. + + targetMtx sync.Mutex + // activeTargets and loops must always be synchronized to have the same // set of hashes. activeTargets map[uint64]*Target droppedTargets []*Target - loops map[uint64]loop - cancel context.CancelFunc - targetLimitHit bool // Internal state to speed up the target_limit checks. // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop @@ -273,8 +276,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed } func (sp *scrapePool) ActiveTargets() []*Target { - sp.mtx.Lock() - defer sp.mtx.Unlock() + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() var tActive []*Target for _, t := range sp.activeTargets { @@ -284,18 +287,19 @@ func (sp *scrapePool) ActiveTargets() []*Target { } func (sp *scrapePool) DroppedTargets() []*Target { - sp.mtx.Lock() - defer sp.mtx.Unlock() + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() return sp.droppedTargets } // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { + sp.mtx.Lock() + defer sp.mtx.Unlock() sp.cancel() var wg sync.WaitGroup - sp.mtx.Lock() - defer sp.mtx.Unlock() + sp.targetMtx.Lock() for fp, l := range sp.loops { wg.Add(1) @@ -308,6 +312,9 @@ func (sp *scrapePool) stop() { delete(sp.loops, fp) delete(sp.activeTargets, fp) } + + sp.targetMtx.Unlock() + wg.Wait() sp.client.CloseIdleConnections() @@ -323,11 +330,10 @@ func (sp *scrapePool) stop() { // but all scrape loops are restarted with the new scrape configuration. // This method returns after all scrape loops that were stopped have stopped scraping. func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { - targetScrapePoolReloads.Inc() - start := time.Now() - sp.mtx.Lock() defer sp.mtx.Unlock() + targetScrapePoolReloads.Inc() + start := time.Now() client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, false, false) if err != nil { @@ -352,6 +358,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { mrc = sp.config.MetricRelabelConfigs ) + sp.targetMtx.Lock() + forcedErr := sp.refreshTargetLimitErr() for fp, oldLoop := range sp.loops { var cache *scrapeCache @@ -387,6 +395,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.loops[fp] = newLoop } + sp.targetMtx.Unlock() + wg.Wait() oldClient.CloseIdleConnections() targetReloadIntervalLength.WithLabelValues(interval.String()).Observe( @@ -400,9 +410,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.mtx.Lock() defer sp.mtx.Unlock() - start := time.Now() + sp.targetMtx.Lock() var all []*Target sp.droppedTargets = []*Target{} for _, tg := range tgs { @@ -419,6 +429,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { } } } + sp.targetMtx.Unlock() sp.sync(all) targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe( @@ -431,7 +442,6 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { // scrape loops for new targets, and stops scrape loops for disappeared targets. // It returns after all stopped scrape loops terminated. func (sp *scrapePool) sync(targets []*Target) { - // This function expects that you have acquired the sp.mtx lock. var ( uniqueLoops = make(map[uint64]loop) interval = time.Duration(sp.config.ScrapeInterval) @@ -442,6 +452,7 @@ func (sp *scrapePool) sync(targets []*Target) { mrc = sp.config.MetricRelabelConfigs ) + sp.targetMtx.Lock() for _, t := range targets { hash := t.hash() @@ -487,6 +498,8 @@ func (sp *scrapePool) sync(targets []*Target) { } } + sp.targetMtx.Unlock() + targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops))) forcedErr := sp.refreshTargetLimitErr() for _, l := range sp.loops { @@ -507,7 +520,6 @@ func (sp *scrapePool) sync(targets []*Target) { // refreshTargetLimitErr returns an error that can be passed to the scrape loops // if the number of targets exceeds the configured limit. func (sp *scrapePool) refreshTargetLimitErr() error { - // This function expects that you have acquired the sp.mtx lock. if sp.config == nil || sp.config.TargetLimit == 0 && !sp.targetLimitHit { return nil } diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index ac9cad0316864..2b4c4301ca4cf 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -75,6 +75,7 @@ func (t *Target) String() string { return t.URL().String() } +// MetricMetadataStore represents a storage for metadata. type MetricMetadataStore interface { ListMetadata() []MetricMetadata GetMetadata(metric string) (MetricMetadata, bool) diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 62b2ef54ae0a2..4bc3db12d9f68 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -19,6 +19,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" ) @@ -79,8 +80,7 @@ func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) querier, err := storage.Querier(ctx, mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. - errs := tsdb_errors.MultiError{err} - errs.Add(primary.Close()) + errs := tsdb_errors.NewMulti(err, primary.Close()) for _, q := range secondaries { errs.Add(q.Close()) } @@ -102,8 +102,7 @@ func (f *fanout) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQueri querier, err := storage.ChunkQuerier(ctx, mint, maxt) if err != nil { // Close already open Queriers, append potential errors to returned error. - errs := tsdb_errors.MultiError{err} - errs.Add(primary.Close()) + errs := tsdb_errors.NewMulti(err, primary.Close()) for _, q := range secondaries { errs.Add(q.Close()) } @@ -129,8 +128,7 @@ func (f *fanout) Appender(ctx context.Context) Appender { // Close closes the storage and all its underlying resources. func (f *fanout) Close() error { - errs := tsdb_errors.MultiError{} - errs.Add(f.primary.Close()) + errs := tsdb_errors.NewMulti(f.primary.Close()) for _, s := range f.secondaries { errs.Add(s.Close()) } diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index e3026cf5eed3e..27e701883ece6 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -22,6 +22,7 @@ import ( "sync" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -247,7 +248,7 @@ func (q *mergeGenericQuerier) LabelNames() ([]string, Warnings, error) { // Close releases the resources of the generic querier. func (q *mergeGenericQuerier) Close() error { - errs := tsdb_errors.MultiError{} + errs := tsdb_errors.NewMulti() for _, querier := range q.queriers { if err := querier.Close(); err != nil { errs.Add(err) @@ -533,11 +534,9 @@ func (c *chainSampleIterator) Next() bool { } func (c *chainSampleIterator) Err() error { - var errs tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for _, iter := range c.iterators { - if err := iter.Err(); err != nil { - errs.Add(err) - } + errs.Add(iter.Err()) } return errs.Err() } @@ -680,11 +679,9 @@ func (c *compactChunkIterator) Next() bool { } func (c *compactChunkIterator) Err() error { - var errs tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for _, iter := range c.iterators { - if err := iter.Err(); err != nil { - errs.Add(err) - } + errs.Add(iter.Err()) } errs.Add(c.err) return errs.Err() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index b7965a2c98c24..4e2b9e5b8ff60 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -27,6 +27,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" + "github.com/opentracing-contrib/go-stdlib/nethttp" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -34,7 +35,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/version" - "github.com/opentracing-contrib/go-stdlib/nethttp" "github.com/prometheus/prometheus/prompb" ) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 47360dd57ba6b..f8033111cb3ed 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -19,12 +19,15 @@ import ( "io/ioutil" "net/http" "sort" + "strings" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -483,3 +486,14 @@ func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Labe } return result } + +// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := prompb.MetricMetadata_MetricType_value[mt] + if !ok { + return prompb.MetricMetadata_UNKNOWN + } + + return prompb.MetricMetadata_MetricType(v) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/intern.go b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go index 98eec34141a69..23047acd9bb0f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/intern.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go @@ -21,10 +21,9 @@ package remote import ( "sync" - "go.uber.org/atomic" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" ) var noReferenceReleases = promauto.NewCounter(prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/max_gauge.go b/vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go similarity index 80% rename from vendor/github.com/prometheus/prometheus/storage/remote/max_gauge.go rename to vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go index a56c2047d5607..3a0a6d6fd4b7b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/max_gauge.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go @@ -19,13 +19,13 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -type maxGauge struct { +type maxTimestamp struct { mtx sync.Mutex value float64 prometheus.Gauge } -func (m *maxGauge) Set(value float64) { +func (m *maxTimestamp) Set(value float64) { m.mtx.Lock() defer m.mtx.Unlock() if value > m.value { @@ -34,8 +34,14 @@ func (m *maxGauge) Set(value float64) { } } -func (m *maxGauge) Get() float64 { +func (m *maxTimestamp) Get() float64 { m.mtx.Lock() defer m.mtx.Unlock() return m.value } + +func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) { + if m.Get() > 0 { + m.Gauge.Collect(c) + } +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go new file mode 100644 index 0000000000000..a347a3f253f60 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -0,0 +1,163 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/scrape" +) + +// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else. +type MetadataAppender interface { + AppendMetadata(context.Context, []scrape.MetricMetadata) +} + +// Watchable represents from where we fetch active targets for metadata. +type Watchable interface { + TargetsActive() map[string][]*scrape.Target +} + +type noopScrapeManager struct{} + +func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { + return nil, errors.New("Scrape manager not ready") +} + +// MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. +type MetadataWatcher struct { + name string + logger log.Logger + + managerGetter ReadyScrapeManager + manager Watchable + writer MetadataAppender + + interval model.Duration + deadline time.Duration + + done chan struct{} + + softShutdownCtx context.Context + softShutdownCancel context.CancelFunc + hardShutdownCancel context.CancelFunc + hardShutdownCtx context.Context +} + +// NewMetadataWatcher builds a new MetadataWatcher. +func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { + if l == nil { + l = log.NewNopLogger() + } + + if mg == nil { + mg = &noopScrapeManager{} + } + + return &MetadataWatcher{ + name: name, + logger: l, + + managerGetter: mg, + writer: w, + + interval: interval, + deadline: deadline, + + done: make(chan struct{}), + } +} + +// Start the MetadataWatcher. +func (mw *MetadataWatcher) Start() { + level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) + mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) + go mw.loop() +} + +// Stop the MetadataWatcher. +func (mw *MetadataWatcher) Stop() { + level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") + defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + + mw.softShutdownCancel() + select { + case <-mw.done: + return + case <-time.After(mw.deadline): + level.Error(mw.logger).Log("msg", "Failed to flush metadata") + } + + mw.hardShutdownCancel() + <-mw.done +} + +func (mw *MetadataWatcher) loop() { + ticker := time.NewTicker(time.Duration(mw.interval)) + defer ticker.Stop() + defer close(mw.done) + + for { + select { + case <-mw.softShutdownCtx.Done(): + return + case <-ticker.C: + mw.collect() + } + } +} + +func (mw *MetadataWatcher) collect() { + if !mw.ready() { + return + } + + // We create a set of the metadata to help deduplicating based on the attributes of a + // scrape.MetricMetadata. In this case, a combination of metric name, help, type, and unit. + metadataSet := map[scrape.MetricMetadata]struct{}{} + metadata := []scrape.MetricMetadata{} + for _, tset := range mw.manager.TargetsActive() { + for _, target := range tset { + for _, entry := range target.MetadataList() { + if _, ok := metadataSet[entry]; !ok { + metadata = append(metadata, entry) + metadataSet[entry] = struct{}{} + } + } + } + } + + // Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired. + mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata) +} + +func (mw *MetadataWatcher) ready() bool { + if mw.manager != nil { + return true + } + + m, err := mw.managerGetter.Get() + if err != nil { + return false + } + + mw.manager = m + return true +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 1a79d6c0ce96c..66d2e67b9be89 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -33,6 +33,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wal" ) @@ -50,20 +51,25 @@ const ( type queueManagerMetrics struct { reg prometheus.Registerer - succeededSamplesTotal prometheus.Counter - failedSamplesTotal prometheus.Counter - retriedSamplesTotal prometheus.Counter - droppedSamplesTotal prometheus.Counter - enqueueRetriesTotal prometheus.Counter - sentBatchDuration prometheus.Histogram - highestSentTimestamp *maxGauge - pendingSamples prometheus.Gauge - shardCapacity prometheus.Gauge - numShards prometheus.Gauge - maxNumShards prometheus.Gauge - minNumShards prometheus.Gauge - desiredNumShards prometheus.Gauge - bytesSent prometheus.Counter + samplesTotal prometheus.Counter + metadataTotal prometheus.Counter + failedSamplesTotal prometheus.Counter + failedMetadataTotal prometheus.Counter + retriedSamplesTotal prometheus.Counter + retriedMetadataTotal prometheus.Counter + droppedSamplesTotal prometheus.Counter + enqueueRetriesTotal prometheus.Counter + sentBatchDuration prometheus.Histogram + highestSentTimestamp *maxTimestamp + pendingSamples prometheus.Gauge + shardCapacity prometheus.Gauge + numShards prometheus.Gauge + maxNumShards prometheus.Gauge + minNumShards prometheus.Gauge + desiredNumShards prometheus.Gauge + samplesBytesTotal prometheus.Counter + metadataBytesTotal prometheus.Counter + maxSamplesPerSend prometheus.Gauge } func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics { @@ -75,31 +81,52 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager endpoint: e, } - m.succeededSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + m.samplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "succeeded_samples_total", - Help: "Total number of samples successfully sent to remote storage.", + Name: "samples_total", + Help: "Total number of samples sent to remote storage.", + ConstLabels: constLabels, + }) + m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_total", + Help: "Total number of metadata entries sent to remote storage.", ConstLabels: constLabels, }) m.failedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "failed_samples_total", + Name: "samples_failed_total", Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) + m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_failed_total", + Help: "Total number of metadata entries which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) m.retriedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "retried_samples_total", + Name: "samples_retried_total", Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) + m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_retried_total", + Help: "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) m.droppedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "dropped_samples_total", + Name: "samples_dropped_total", Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write.", ConstLabels: constLabels, }) @@ -114,11 +141,11 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Namespace: namespace, Subsystem: subsystem, Name: "sent_batch_duration_seconds", - Help: "Duration of sample batch send calls to the remote storage.", + Help: "Duration of send calls to the remote storage.", Buckets: append(prometheus.DefBuckets, 25, 60, 120, 300), ConstLabels: constLabels, }) - m.highestSentTimestamp = &maxGauge{ + m.highestSentTimestamp = &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, @@ -130,7 +157,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager m.pendingSamples = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "pending_samples", + Name: "samples_pending", Help: "The number of samples pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) @@ -169,11 +196,25 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", ConstLabels: constLabels, }) - m.bytesSent = prometheus.NewCounter(prometheus.CounterOpts{ + m.samplesBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_bytes_total", + Help: "The total number of bytes of samples sent by the queue after compression.", + ConstLabels: constLabels, + }) + m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_bytes_total", + Help: "The total number of bytes of metadata sent by the queue after compression.", + ConstLabels: constLabels, + }) + m.maxSamplesPerSend = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "sent_bytes_total", - Help: "The total number of bytes sent by the queue.", + Name: "max_samples_per_send", + Help: "The maximum number of samples to be sent, in a single request, to the remote storage.", ConstLabels: constLabels, }) @@ -183,9 +224,12 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager func (m *queueManagerMetrics) register() { if m.reg != nil { m.reg.MustRegister( - m.succeededSamplesTotal, + m.samplesTotal, + m.metadataTotal, m.failedSamplesTotal, + m.failedMetadataTotal, m.retriedSamplesTotal, + m.retriedMetadataTotal, m.droppedSamplesTotal, m.enqueueRetriesTotal, m.sentBatchDuration, @@ -196,16 +240,21 @@ func (m *queueManagerMetrics) register() { m.maxNumShards, m.minNumShards, m.desiredNumShards, - m.bytesSent, + m.samplesBytesTotal, + m.metadataBytesTotal, + m.maxSamplesPerSend, ) } } func (m *queueManagerMetrics) unregister() { if m.reg != nil { - m.reg.Unregister(m.succeededSamplesTotal) + m.reg.Unregister(m.samplesTotal) + m.reg.Unregister(m.metadataTotal) m.reg.Unregister(m.failedSamplesTotal) + m.reg.Unregister(m.failedMetadataTotal) m.reg.Unregister(m.retriedSamplesTotal) + m.reg.Unregister(m.retriedMetadataTotal) m.reg.Unregister(m.droppedSamplesTotal) m.reg.Unregister(m.enqueueRetriesTotal) m.reg.Unregister(m.sentBatchDuration) @@ -216,7 +265,9 @@ func (m *queueManagerMetrics) unregister() { m.reg.Unregister(m.maxNumShards) m.reg.Unregister(m.minNumShards) m.reg.Unregister(m.desiredNumShards) - m.reg.Unregister(m.bytesSent) + m.reg.Unregister(m.samplesBytesTotal) + m.reg.Unregister(m.metadataBytesTotal) + m.reg.Unregister(m.maxSamplesPerSend) } } @@ -237,12 +288,14 @@ type WriteClient interface { type QueueManager struct { lastSendTimestamp atomic.Int64 - logger log.Logger - flushDeadline time.Duration - cfg config.QueueConfig - externalLabels labels.Labels - relabelConfigs []*relabel.Config - watcher *wal.Watcher + logger log.Logger + flushDeadline time.Duration + cfg config.QueueConfig + mcfg config.MetadataConfig + externalLabels labels.Labels + relabelConfigs []*relabel.Config + watcher *wal.Watcher + metadataWatcher *MetadataWatcher clientMtx sync.RWMutex storeClient WriteClient @@ -262,7 +315,7 @@ type QueueManager struct { metrics *queueManagerMetrics interner *pool - highestRecvTimestamp *maxGauge + highestRecvTimestamp *maxTimestamp } // NewQueueManager builds a new QueueManager. @@ -274,12 +327,14 @@ func NewQueueManager( walDir string, samplesIn *ewmaRate, cfg config.QueueConfig, + mCfg config.MetadataConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client WriteClient, flushDeadline time.Duration, interner *pool, - highestRecvTimestamp *maxGauge, + highestRecvTimestamp *maxTimestamp, + sm ReadyScrapeManager, ) *QueueManager { if logger == nil { logger = log.NewNopLogger() @@ -290,6 +345,7 @@ func NewQueueManager( logger: logger, flushDeadline: flushDeadline, cfg: cfg, + mcfg: mCfg, externalLabels: externalLabels, relabelConfigs: relabelConfigs, storeClient: client, @@ -313,11 +369,77 @@ func NewQueueManager( } t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir) + if t.mcfg.Send { + t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) + } t.shards = t.newShards() return t } +// AppendMetadata sends metadata the remote storage. Metadata is sent all at once and is not parallelized. +func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) { + mm := make([]prompb.MetricMetadata, 0, len(metadata)) + for _, entry := range metadata { + mm = append(mm, prompb.MetricMetadata{ + MetricFamilyName: entry.Metric, + Help: entry.Help, + Type: metricTypeToMetricTypeProto(entry.Type), + Unit: entry.Unit, + }) + } + + err := t.sendMetadataWithBackoff(ctx, mm) + + if err != nil { + t.metrics.failedMetadataTotal.Add(float64(len(metadata))) + level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", len(metadata), "err", err) + } +} + +func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata) error { + // Build the WriteRequest with no samples. + req, _, err := buildWriteRequest(nil, metadata, nil) + if err != nil { + return err + } + + metadataCount := len(metadata) + + attemptStore := func(try int) error { + span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Metadata Send Batch") + defer span.Finish() + + span.SetTag("metadata", metadataCount) + span.SetTag("try", try) + span.SetTag("remote_name", t.storeClient.Name()) + span.SetTag("remote_url", t.storeClient.Endpoint()) + + begin := time.Now() + err := t.storeClient.Store(ctx, req) + t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + + if err != nil { + span.LogKV("error", err) + ext.Error.Set(span, true) + return err + } + + return nil + } + + retry := func() { + t.metrics.retriedMetadataTotal.Add(float64(len(metadata))) + } + err = sendWriteRequestWithBackoff(ctx, t.cfg, t.client(), t.logger, req, attemptStore, retry) + if err != nil { + return err + } + t.metrics.metadataTotal.Add(float64(len(metadata))) + t.metrics.metadataBytesTotal.Add(float64(len(req))) + return nil +} + // Append queues a sample to be sent to the remote storage. Blocks until all samples are // enqueued on their shards or a shutdown signal is received. func (t *QueueManager) Append(samples []record.RefSample) bool { @@ -372,9 +494,13 @@ func (t *QueueManager) Start() { t.metrics.maxNumShards.Set(float64(t.cfg.MaxShards)) t.metrics.minNumShards.Set(float64(t.cfg.MinShards)) t.metrics.desiredNumShards.Set(float64(t.cfg.MinShards)) + t.metrics.maxSamplesPerSend.Set(float64(t.cfg.MaxSamplesPerSend)) t.shards.start(t.numShards) t.watcher.Start() + if t.mcfg.Send { + t.metadataWatcher.Start() + } t.wg.Add(2) go t.updateShardsLoop() @@ -389,11 +515,14 @@ func (t *QueueManager) Stop() { close(t.quit) t.wg.Wait() - // Wait for all QueueManager routines to end before stopping shards and WAL watcher. This + // Wait for all QueueManager routines to end before stopping shards, metadata watcher, and WAL watcher. This // is to ensure we don't end up executing a reshard and shards.stop() at the same time, which // causes a closed channel panic. t.shards.stop() t.watcher.Stop() + if t.mcfg.Send { + t.metadataWatcher.Stop() + } // On shutdown, release the strings in the labels from the intern pool. t.seriesMtx.Lock() @@ -857,23 +986,22 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, b // sendSamples to the remote storage with backoff for recoverable errors. func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) error { - req, highest, err := buildWriteRequest(samples, *buf) + // Build the WriteRequest with no metadata. + req, highest, err := buildWriteRequest(samples, nil, *buf) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. return err } - backoff := s.qm.cfg.MinBackoff reqSize := len(*buf) sampleCount := len(samples) *buf = req - try := 0 // An anonymous function allows us to defer the completion of our per-try spans // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. - attemptStore := func() error { + attemptStore := func(try int) error { span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Send Batch") defer span.Finish() @@ -884,6 +1012,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti span.SetTag("remote_url", s.qm.storeClient.Endpoint()) begin := time.Now() + s.qm.metrics.samplesTotal.Add(float64(sampleCount)) err := s.qm.client().Store(ctx, *buf) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) @@ -896,6 +1025,23 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti return nil } + onRetry := func() { + s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) + } + + err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.client(), s.qm.logger, req, attemptStore, onRetry) + if err != nil { + return err + } + s.qm.metrics.samplesBytesTotal.Add(float64(reqSize)) + s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) + return nil +} + +func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, s WriteClient, l log.Logger, req []byte, attempt func(int) error, onRetry func()) error { + backoff := cfg.MinBackoff + try := 0 + for { select { case <-ctx.Done(): @@ -903,37 +1049,34 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti default: } - err = attemptStore() + err := attempt(try) - if err != nil { - // If the error is unrecoverable, we should not retry. - if _, ok := err.(RecoverableError); !ok { - return err - } + if err == nil { + return nil + } - // If we make it this far, we've encountered a recoverable error and will retry. - s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) - level.Warn(s.qm.logger).Log("msg", "Failed to send batch, retrying", "err", err) - time.Sleep(time.Duration(backoff)) - backoff = backoff * 2 + // If the error is unrecoverable, we should not retry. + if _, ok := err.(RecoverableError); !ok { + return err + } - if backoff > s.qm.cfg.MaxBackoff { - backoff = s.qm.cfg.MaxBackoff - } + // If we make it this far, we've encountered a recoverable error and will retry. + onRetry() + level.Debug(l).Log("msg", "failed to send batch, retrying", "err", err) - try++ - continue + time.Sleep(time.Duration(backoff)) + backoff = backoff * 2 + + if backoff > cfg.MaxBackoff { + backoff = cfg.MaxBackoff } - // Since we retry forever on recoverable errors, this needs to stay inside the loop. - s.qm.metrics.succeededSamplesTotal.Add(float64(sampleCount)) - s.qm.metrics.bytesSent.Add(float64(reqSize)) - s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) - return nil + try++ + continue } } -func buildWriteRequest(samples []prompb.TimeSeries, buf []byte) ([]byte, int64, error) { +func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) { var highest int64 for _, ts := range samples { // At the moment we only ever append a TimeSeries with a single sample in it. @@ -941,8 +1084,10 @@ func buildWriteRequest(samples []prompb.TimeSeries, buf []byte) ([]byte, int64, highest = ts.Samples[0].Timestamp } } + req := &prompb.WriteRequest{ Timeseries: samples, + Metadata: metadata, } data, err := proto.Marshal(req) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index 39822a6763f7b..4718b479786d1 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -17,6 +17,7 @@ import ( "context" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" ) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index 36d7c011456ee..2ca540ed3ea4a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -22,13 +22,14 @@ import ( "time" "github.com/go-kit/kit/log" - "gopkg.in/yaml.v2" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/logging" + "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" ) @@ -40,6 +41,10 @@ const ( endpoint = "url" ) +type ReadyScrapeManager interface { + Get() (*scrape.Manager, error) +} + // startTimeCallback is a callback func that return the oldest timestamp stored in a storage. type startTimeCallback func() (int64, error) @@ -57,7 +62,7 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration) *Storage { +func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage { if l == nil { l = log.NewNopLogger() } @@ -66,7 +71,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal logger: logging.Dedupe(l, 1*time.Minute), localStartTimeCallback: stCallback, } - s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline) + s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) return s } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 8b0bf7622f3a2..296ffbceff0af 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -22,6 +22,7 @@ import ( "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" @@ -51,13 +52,14 @@ type WriteStorage struct { samplesIn *ewmaRate flushDeadline time.Duration interner *pool + scraper ReadyScrapeManager // For timestampTracker. - highestTimestamp *maxGauge + highestTimestamp *maxTimestamp } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string, flushDeadline time.Duration) *WriteStorage { +func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage { if logger == nil { logger = log.NewNopLogger() } @@ -71,7 +73,8 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration), walDir: walDir, interner: newPool(), - highestTimestamp: &maxGauge{ + scraper: sm, + highestTimestamp: &maxTimestamp{ Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, @@ -154,12 +157,14 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.walDir, rws.samplesIn, rwConf.QueueConfig, + rwConf.MetadataConfig, conf.GlobalConfig.ExternalLabels, rwConf.WriteRelabelConfigs, c, rws.flushDeadline, rws.interner, rws.highestTimestamp, + rws.scraper, ) // Keep track of which queues are new so we know which to start. newHashes = append(newHashes, hash) @@ -202,7 +207,7 @@ type timestampTracker struct { writeStorage *WriteStorage samples int64 highestTimestamp int64 - highestRecvTimestamp *maxGauge + highestRecvTimestamp *maxTimestamp } // Add implements storage.Appender. diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 0a31c4e54a4a3..10a9241c79641 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -17,20 +17,19 @@ import ( "bytes" "context" "fmt" + html_template "html/template" "math" "net/url" "regexp" "sort" "strings" - "time" - - html_template "html/template" text_template "text/template" + "time" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/strutil" ) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md index 844ab5b37c691..66d07bf3cc7af 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md @@ -33,7 +33,7 @@ - [BUGFIX] Don't panic and recover nicely when running out of disk space. - [BUGFIX] Correctly handle empty labels. - [BUGFIX] Don't crash on an unknown tombstone ref. - - [ENHANCEMENT] Re-add FromData function to create a chunk from bytes. It is used by Cortex and Thanos. + - [ENHANCEMENT] Re-add `FromData` function to create a chunk from bytes. It is used by Cortex and Thanos. - [ENHANCEMENT] Simplify mergedPostings.Seek. - [FEATURE] Added `currentSegment` metric for the current WAL segment it is being written to. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/README.md b/vendor/github.com/prometheus/prometheus/tsdb/README.md index 61f867088203c..248004b9d39e4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/README.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/README.md @@ -2,7 +2,7 @@ [![GoDoc](https://godoc.org/github.com/prometheus/prometheus/tsdb?status.svg)](https://godoc.org/github.com/prometheus/prometheus/tsdb) -This repository contains the Prometheus storage layer that is used in its 2.x releases. +This directory contains the Prometheus storage layer that is used in its 2.x releases. A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/). @@ -11,3 +11,9 @@ Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-te Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). See also the [format documentation](docs/format/README.md). + +A series of blog posts explaining different components of TSDB: +* [The Head Block](https://ganeshvernekar.com/blog/prometheus-tsdb-the-head-block/) +* [WAL and Checkpoint](https://ganeshvernekar.com/blog/prometheus-tsdb-wal-and-checkpoint/) +* [Memory Mapping of Head Chunks from Disk](https://ganeshvernekar.com/blog/prometheus-tsdb-mmapping-head-chunks-from-disk/) +* [Persistent Block and its Index](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/) \ No newline at end of file diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 0df30e846c35a..3ec2261971e6d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -26,6 +26,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -172,7 +173,7 @@ type BlockMetaCompaction struct { // ULIDs of all source head blocks that went into the block. Sources []ulid.ULID `json:"sources,omitempty"` // Indicates that during compaction it resulted in a block without any samples - // so it should be deleted on the next reload. + // so it should be deleted on the next reloadBlocks. Deletable bool `json:"deletable,omitempty"` // Short descriptions of the direct blocks that were used to create // this block. @@ -225,19 +226,14 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error return 0, err } - var merr tsdb_errors.MultiError n, err := f.Write(jsonMeta) if err != nil { - merr.Add(err) - merr.Add(f.Close()) - return 0, merr.Err() + return 0, tsdb_errors.NewMulti(err, f.Close()).Err() } // Force the kernel to persist the file on disk to avoid data loss if the host crashes. if err := f.Sync(); err != nil { - merr.Add(err) - merr.Add(f.Close()) - return 0, merr.Err() + return 0, tsdb_errors.NewMulti(err, f.Close()).Err() } if err := f.Close(); err != nil { return 0, err @@ -279,10 +275,7 @@ func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, er var closers []io.Closer defer func() { if err != nil { - var merr tsdb_errors.MultiError - merr.Add(err) - merr.Add(closeAll(closers)) - err = merr.Err() + err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err() } }() meta, sizeMeta, err := readMetaFile(dir) @@ -332,13 +325,11 @@ func (pb *Block) Close() error { pb.pendingReaders.Wait() - var merr tsdb_errors.MultiError - - merr.Add(pb.chunkr.Close()) - merr.Add(pb.indexr.Close()) - merr.Add(pb.tombstones.Close()) - - return merr.Err() + return tsdb_errors.NewMulti( + pb.chunkr.Close(), + pb.indexr.Close(), + pb.tombstones.Close(), + ).Err() } func (pb *Block) String() string { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index bb6184169281e..0cd05eb77fdc1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" ) // BlockWriter is a block writer that allows appending and flushing series to disk. @@ -67,7 +68,7 @@ func (w *BlockWriter) initHead() error { } w.chunkDir = chunkDir - h, err := NewHead(nil, w.logger, nil, w.blockSize, w.chunkDir, nil, DefaultStripeSize, nil) + h, err := NewHead(nil, w.logger, nil, w.blockSize, w.chunkDir, nil, chunks.DefaultWriteBufferSize, DefaultStripeSize, nil) if err != nil { return errors.Wrap(err, "tsdb.NewHead") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index fa67c1cf4c599..f52b5b932384a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -14,7 +14,6 @@ package chunkenc import ( - "fmt" "math" "sync" @@ -132,7 +131,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { c.b.count = 0 return c, nil } - return nil, errors.Errorf("invalid encoding %q", e) + return nil, errors.Errorf("invalid chunk encoding %q", e) } func (p *pool) Put(c Chunk) error { @@ -149,7 +148,7 @@ func (p *pool) Put(c Chunk) error { xc.b.count = 0 p.xor.Put(c) default: - return errors.Errorf("invalid encoding %q", c.Encoding()) + return errors.Errorf("invalid chunk encoding %q", c.Encoding()) } return nil } @@ -162,5 +161,5 @@ func FromData(e Encoding, d []byte) (Chunk, error) { case EncXOR: return &XORChunk{b: bstream{count: 0, stream: d}}, nil } - return nil, fmt.Errorf("unknown chunk encoding: %d", e) + return nil, errors.Errorf("invalid chunk encoding %q", e) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index 6f7ea09b44f29..11417c38cf1b9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -27,6 +27,7 @@ import ( "strconv" "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/chunkenc" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -229,14 +230,13 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all } defer func() { if returnErr != nil { - var merr tsdb_errors.MultiError - merr.Add(returnErr) + errs := tsdb_errors.NewMulti(returnErr) if f != nil { - merr.Add(f.Close()) + errs.Add(f.Close()) } // Calling RemoveAll on a non-existent file does not return error. - merr.Add(os.RemoveAll(ptmp)) - returnErr = merr.Err() + errs.Add(os.RemoveAll(ptmp)) + returnErr = errs.Err() } }() if allocSize > 0 { @@ -462,16 +462,16 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { } var ( - bs []ByteSlice - cs []io.Closer - merr tsdb_errors.MultiError + bs []ByteSlice + cs []io.Closer ) for _, fn := range files { f, err := fileutil.OpenMmapFile(fn) if err != nil { - merr.Add(errors.Wrap(err, "mmap files")) - merr.Add(closeAll(cs)) - return nil, merr + return nil, tsdb_errors.NewMulti( + errors.Wrap(err, "mmap files"), + tsdb_errors.CloseAll(cs), + ).Err() } cs = append(cs, f) bs = append(bs, realByteSlice(f.Bytes())) @@ -479,15 +479,16 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { reader, err := newReader(bs, cs, pool) if err != nil { - merr.Add(err) - merr.Add(closeAll(cs)) - return nil, merr + return nil, tsdb_errors.NewMulti( + err, + tsdb_errors.CloseAll(cs), + ).Err() } return reader, nil } func (s *Reader) Close() error { - return closeAll(s.cs) + return tsdb_errors.CloseAll(s.cs) } // Size returns the size of the chunks. @@ -587,12 +588,3 @@ func sequenceFiles(dir string) ([]string, error) { } return res, nil } - -func closeAll(cs []io.Closer) error { - var merr tsdb_errors.MultiError - - for _, c := range cs { - merr.Add(c.Close()) - } - return merr.Err() -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index 632682218cf26..d82c12d33f04e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -27,10 +27,11 @@ import ( "sync" "github.com/pkg/errors" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/tsdb/chunkenc" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" - "go.uber.org/atomic" ) // Head chunk file header fields constants. @@ -39,7 +40,6 @@ const ( MagicHeadChunks = 0x0130BC91 headChunksFormatV1 = 1 - writeBufferSize = 4 * 1024 * 1024 // 4 MiB. ) var ( @@ -62,6 +62,12 @@ const ( // MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data. // Max because the uvarint size can be smaller. MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunksFormatVersionSize + MaxChunkLengthFieldSize + CRCSize + // MinWriteBufferSize is the minimum write buffer size allowed. + MinWriteBufferSize = 64 * 1024 // 64KB. + // MaxWriteBufferSize is the maximum write buffer size allowed. + MaxWriteBufferSize = 8 * 1024 * 1024 // 8 MiB. + // DefaultWriteBufferSize is the default write buffer size. + DefaultWriteBufferSize = 4 * 1024 * 1024 // 4 MiB. ) // corruptionErr is an error that's returned when corruption is encountered. @@ -81,7 +87,8 @@ type ChunkDiskMapper struct { curFileNumBytes atomic.Int64 // Bytes written in current open file. /// Writer. - dir *os.File + dir *os.File + writeBufferSize int curFile *os.File // File being written to. curFileSequence int // Index of current open file being appended to. @@ -104,10 +111,6 @@ type ChunkDiskMapper struct { // from which chunks are served till they are flushed and are ready for m-mapping. chunkBuffer *chunkBuffer - // The total size of bytes in the closed files. - // Needed to calculate the total size of all segments on disk. - size atomic.Int64 - // If 'true', it indicated that the maxt of all the on-disk files were set // after iterating through all the chunks in those files. fileMaxtSet bool @@ -124,7 +127,15 @@ type mmappedChunkFile struct { // using the default head chunk file duration. // NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper // to set the maxt of all the file. -func NewChunkDiskMapper(dir string, pool chunkenc.Pool) (*ChunkDiskMapper, error) { +func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*ChunkDiskMapper, error) { + // Validate write buffer size. + if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize { + return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxHeadChunkFileSize, writeBufferSize) + } + if writeBufferSize%1024 != 0 { + return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) + } + if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } @@ -134,10 +145,11 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool) (*ChunkDiskMapper, error } m := &ChunkDiskMapper{ - dir: dirFile, - pool: pool, - crc32: newCRC32(), - chunkBuffer: newChunkBuffer(), + dir: dirFile, + pool: pool, + writeBufferSize: writeBufferSize, + crc32: newCRC32(), + chunkBuffer: newChunkBuffer(), } if m.pool == nil { @@ -152,10 +164,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { cdm.closers = map[int]io.Closer{} defer func() { if returnErr != nil { - var merr tsdb_errors.MultiError - merr.Add(returnErr) - merr.Add(closeAllFromMap(cdm.closers)) - returnErr = merr.Err() + returnErr = tsdb_errors.NewMulti(returnErr, closeAllFromMap(cdm.closers)).Err() cdm.mmappedChunkFiles = nil cdm.closers = nil @@ -167,6 +176,11 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { return err } + files, err = repairLastChunkFile(files) + if err != nil { + return err + } + chkFileIndices := make([]int, 0, len(files)) for seq, fn := range files { f, err := fileutil.OpenMmapFile(fn) @@ -178,8 +192,6 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { chkFileIndices = append(chkFileIndices, seq) } - cdm.size.Store(int64(0)) - // Check for gaps in the files. sort.Ints(chkFileIndices) if len(chkFileIndices) == 0 { @@ -206,8 +218,6 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { return errors.Errorf("%s: invalid chunk format version %d", files[i], v) } - - cdm.size.Add(int64(b.byteSlice.Len())) } return nil @@ -226,9 +236,40 @@ func listChunkFiles(dir string) (map[int]string, error) { } res[int(seq)] = filepath.Join(dir, fi.Name()) } + return res, nil } +// repairLastChunkFile deletes the last file if it's empty. +// Because we don't fsync when creating these file, we could end +// up with an empty file at the end during an abrupt shutdown. +func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr error) { + lastFile := -1 + for seq := range files { + if seq > lastFile { + lastFile = seq + } + } + + if lastFile <= 0 { + return files, nil + } + + info, err := os.Stat(files[lastFile]) + if err != nil { + return files, errors.Wrap(err, "file stat during last head chunk file repair") + } + if info.Size() == 0 { + // Corrupt file, hence remove it. + if err := os.RemoveAll(files[lastFile]); err != nil { + return files, errors.Wrap(err, "delete corrupted, empty head chunk file during last file repair") + } + delete(files, lastFile) + } + + return files, nil +} + // WriteChunk writes the chunk to the disk. // The returned chunk ref is the reference from where the chunk encoding starts for the chunk. func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk chunkenc.Chunk) (chkRef uint64, err error) { @@ -247,7 +288,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk c // if len(chk.Bytes())+MaxHeadChunkMetaSize >= writeBufferSize, it means that chunk >= the buffer size; // so no need to flush here, as we have to flush at the end (to not keep partial chunks in buffer). - if len(chk.Bytes())+MaxHeadChunkMetaSize < writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) { + if len(chk.Bytes())+MaxHeadChunkMetaSize < cdm.writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) { if err := cdm.flushBuffer(); err != nil { return 0, err } @@ -287,7 +328,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk c cdm.chunkBuffer.put(chkRef, chk) - if len(chk.Bytes())+MaxHeadChunkMetaSize >= writeBufferSize { + if len(chk.Bytes())+MaxHeadChunkMetaSize >= cdm.writeBufferSize { // The chunk was bigger than the buffer itself. // Flushing to not keep partial chunks in buffer. if err := cdm.flushBuffer(); err != nil { @@ -333,14 +374,10 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) { // The file should not be closed if there is no error, // its kept open in the ChunkDiskMapper. if returnErr != nil { - var merr tsdb_errors.MultiError - merr.Add(returnErr) - merr.Add(newFile.Close()) - returnErr = merr.Err() + returnErr = tsdb_errors.NewMulti(returnErr, newFile.Close()).Err() } }() - cdm.size.Add(cdm.curFileSize()) cdm.curFileNumBytes.Store(int64(n)) if cdm.curFile != nil { @@ -360,7 +397,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) { if cdm.chkWriter != nil { cdm.chkWriter.Reset(newFile) } else { - cdm.chkWriter = bufio.NewWriterSize(newFile, writeBufferSize) + cdm.chkWriter = bufio.NewWriterSize(newFile, cdm.writeBufferSize) } cdm.closers[cdm.curFileSequence] = mmapFile @@ -680,13 +717,13 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error { } cdm.readPathMtx.RUnlock() - var merr tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() // Cut a new file only if the current file has some chunks. if cdm.curFileSize() > HeadChunkFileHeaderSize { - merr.Add(cdm.CutNewFile()) + errs.Add(cdm.CutNewFile()) } - merr.Add(cdm.deleteFiles(removedFiles)) - return merr.Err() + errs.Add(cdm.deleteFiles(removedFiles)) + return errs.Err() } func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) error { @@ -696,7 +733,6 @@ func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) error { cdm.readPathMtx.Unlock() return err } - cdm.size.Sub(int64(cdm.mmappedChunkFiles[seq].byteSlice.Len())) delete(cdm.mmappedChunkFiles, seq) delete(cdm.closers, seq) } @@ -735,8 +771,8 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { } // Size returns the size of the chunk files. -func (cdm *ChunkDiskMapper) Size() int64 { - return cdm.size.Load() + cdm.curFileSize() +func (cdm *ChunkDiskMapper) Size() (int64, error) { + return fileutil.DirSize(cdm.dir.Name()) } func (cdm *ChunkDiskMapper) curFileSize() int64 { @@ -758,23 +794,23 @@ func (cdm *ChunkDiskMapper) Close() error { } cdm.closed = true - var merr tsdb_errors.MultiError - merr.Add(closeAllFromMap(cdm.closers)) - merr.Add(cdm.finalizeCurFile()) - merr.Add(cdm.dir.Close()) - + errs := tsdb_errors.NewMulti( + closeAllFromMap(cdm.closers), + cdm.finalizeCurFile(), + cdm.dir.Close(), + ) cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{} cdm.closers = map[int]io.Closer{} - return merr.Err() + return errs.Err() } func closeAllFromMap(cs map[int]io.Closer) error { - var merr tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for _, c := range cs { - merr.Add(c.Close()) + errs.Add(c.Close()) } - return merr.Err() + return errs.Err() } const inBufferShards = 128 // 128 is a randomly chosen number. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 74f54fdb9c818..7c6e142166f71 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -29,6 +29,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -329,7 +330,9 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta { return splitDirs } -func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { +// CompactBlockMetas merges many block metas into one, combining it's source blocks together +// and adjusting compaction level. +func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { res := &BlockMeta{ ULID: uid, MinTime: blocks[0].MinTime, @@ -414,7 +417,7 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u uid = ulid.MustNew(ulid.Now(), rand.Reader) - meta := compactBlockMetas(uid, metas...) + meta := CompactBlockMetas(uid, metas...) err = c.write(dest, meta, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { @@ -450,17 +453,16 @@ func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (u return uid, nil } - var merr tsdb_errors.MultiError - merr.Add(err) + errs := tsdb_errors.NewMulti(err) if err != context.Canceled { for _, b := range bs { if err := b.setCompactionFailed(); err != nil { - merr.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) + errs.Add(errors.Wrapf(err, "setting compaction failed for block: %s", b.Dir())) } } } - return uid, merr + return uid, errs.Err() } func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { @@ -488,6 +490,12 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, p } if meta.Stats.NumSamples == 0 { + level.Info(c.logger).Log( + "msg", "write block resulted in empty block", + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "duration", time.Since(start), + ) return ulid.ULID{}, nil } @@ -521,16 +529,12 @@ func (w *instrumentedChunkWriter) WriteChunks(chunks ...chunks.Meta) error { } // write creates a new block that is the union of the provided blocks into dir. -// It cleans up all files of the old blocks after completing successfully. func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockReader) (err error) { dir := filepath.Join(dest, meta.ULID.String()) tmp := dir + tmpForCreationBlockDirSuffix var closers []io.Closer defer func(t time.Time) { - var merr tsdb_errors.MultiError - merr.Add(err) - merr.Add(closeAll(closers)) - err = merr.Err() + err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(closers)).Err() // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if err := os.RemoveAll(tmp); err != nil { @@ -587,13 +591,13 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe // though these are covered under defer. This is because in Windows, // you cannot delete these unless they are closed and the defer is to // make sure they are closed if the function exits due to an error above. - var merr tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for _, w := range closers { - merr.Add(w.Close()) + errs.Add(w.Close()) } closers = closers[:0] // Avoid closing the writers twice in the defer. - if merr.Err() != nil { - return merr.Err() + if errs.Err() != nil { + return errs.Err() } // Populated block is empty, so exit early. @@ -630,7 +634,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe } df = nil - // Block successfully written, make visible and remove old ones. + // Block successfully written, make it visible in destination dir by moving it from tmp one. if err := fileutil.Replace(tmp, dir); err != nil { return errors.Wrap(err, "rename block dir") } @@ -653,12 +657,11 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, overlapping bool ) defer func() { - var merr tsdb_errors.MultiError - merr.Add(err) - if cerr := closeAll(closers); cerr != nil { - merr.Add(errors.Wrap(cerr, "close")) + errs := tsdb_errors.NewMulti(err) + if cerr := tsdb_errors.CloseAll(closers); cerr != nil { + errs.Add(errors.Wrap(cerr, "close")) } - err = merr.Err() + err = errs.Err() c.metrics.populatingBlocks.Set(0) }() c.metrics.populatingBlocks.Set(1) @@ -713,7 +716,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, symbols = syms continue } - symbols = newMergedStringIter(symbols, syms) + symbols = NewMergedStringIter(symbols, syms) } for symbols.Next() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 4c70a40527730..f63437fc6ae4e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -34,16 +34,16 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + _ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minium Go version is met. "github.com/prometheus/prometheus/tsdb/wal" - "golang.org/x/sync/errgroup" - - // Load the package into main to make sure minium Go version is met. - _ "github.com/prometheus/prometheus/tsdb/goversion" ) const ( @@ -68,14 +68,15 @@ var ( // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wal.DefaultSegmentSize, - RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), - MinBlockDuration: DefaultBlockDuration, - MaxBlockDuration: DefaultBlockDuration, - NoLockfile: false, - AllowOverlappingBlocks: false, - WALCompression: false, - StripeSize: DefaultStripeSize, + WALSegmentSize: wal.DefaultSegmentSize, + RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), + MinBlockDuration: DefaultBlockDuration, + MaxBlockDuration: DefaultBlockDuration, + NoLockfile: false, + AllowOverlappingBlocks: false, + WALCompression: false, + StripeSize: DefaultStripeSize, + HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, } } @@ -123,6 +124,9 @@ type Options struct { // Typically it is in milliseconds. MaxBlockDuration int64 + // HeadChunksWriteBufferSize configures the write buffer size used by the head chunks mapper. + HeadChunksWriteBufferSize int + // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. // It is always a no-op in Prometheus and mainly meant for external users who import TSDB. SeriesLifecycleCallback SeriesLifecycleCallback @@ -199,7 +203,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { }) m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_symbol_table_size_bytes", - Help: "Size of symbol table on disk (in bytes)", + Help: "Size of symbol table in memory for loaded blocks", }, func() float64 { db.mtx.RLock() blocks := db.blocks[:] @@ -216,7 +220,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { }) m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_reloads_failures_total", - Help: "Number of times the database failed to reload block data from disk.", + Help: "Number of times the database failed to reloadBlocks block data from disk.", }) m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_compactions_triggered_total", @@ -329,15 +333,15 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { if err != nil { return err } - head, err := NewHead(nil, db.logger, w, DefaultBlockDuration, db.dir, nil, DefaultStripeSize, nil) + head, err := NewHead(nil, db.logger, w, DefaultBlockDuration, db.dir, nil, chunks.DefaultWriteBufferSize, DefaultStripeSize, nil) if err != nil { return err } defer func() { - var merr tsdb_errors.MultiError - merr.Add(returnErr) - merr.Add(errors.Wrap(head.Close(), "closing Head")) - returnErr = merr.Err() + returnErr = tsdb_errors.NewMulti( + returnErr, + errors.Wrap(head.Close(), "closing Head"), + ).Err() }() // Set the min valid time for the ingested wal samples // to be no lower than the maxt of the last block. @@ -382,7 +386,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue blocks[i] = b } - head, err := NewHead(nil, db.logger, nil, DefaultBlockDuration, db.dir, nil, DefaultStripeSize, nil) + head, err := NewHead(nil, db.logger, nil, DefaultBlockDuration, db.dir, nil, chunks.DefaultWriteBufferSize, DefaultStripeSize, nil) if err != nil { return nil, err } @@ -400,7 +404,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue if err != nil { return nil, err } - head, err = NewHead(nil, db.logger, w, DefaultBlockDuration, db.dir, nil, DefaultStripeSize, nil) + head, err = NewHead(nil, db.logger, w, DefaultBlockDuration, db.dir, nil, chunks.DefaultWriteBufferSize, DefaultStripeSize, nil) if err != nil { return nil, err } @@ -467,11 +471,11 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b) } } - var merr tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) } - return nil, merr.Err() + return nil, errs.Err() } if len(loadable) == 0 { @@ -515,12 +519,7 @@ func (db *DBReadOnly) Close() error { } close(db.closed) - var merr tsdb_errors.MultiError - - for _, b := range db.closers { - merr.Add(b.Close()) - } - return merr.Err() + return tsdb_errors.CloseAll(db.closers) } // Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used. @@ -537,7 +536,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.StripeSize <= 0 { opts.StripeSize = DefaultStripeSize } - + if opts.HeadChunksWriteBufferSize <= 0 { + opts.HeadChunksWriteBufferSize = chunks.DefaultWriteBufferSize + } if opts.MinBlockDuration <= 0 { opts.MinBlockDuration = DefaultBlockDuration } @@ -553,7 +554,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { return opts, rngs } -func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (db *DB, err error) { +func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (_ *DB, returnedErr error) { if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } @@ -584,7 +585,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return nil, errors.Wrap(err, "remove tmp dirs") } - db = &DB{ + db := &DB{ dir: dir, logger: l, opts: opts, @@ -595,6 +596,20 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs chunkPool: chunkenc.NewPool(), blocksToDelete: opts.BlocksToDelete, } + defer func() { + // Close files if startup fails somewhere. + if returnedErr == nil { + return + } + + close(db.donec) // DB is never run if it was an error, so close this channel here. + + returnedErr = tsdb_errors.NewMulti( + returnedErr, + errors.Wrap(db.Close(), "close DB after failed startup"), + ).Err() + }() + if db.blocksToDelete == nil { db.blocksToDelete = DefaultBlocksToDelete(db) } @@ -611,6 +626,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.lockf = lockf } + var err error ctx, cancel := context.WithCancel(context.Background()) db.compactor, err = NewLeveledCompactor(ctx, r, l, rngs, db.chunkPool) if err != nil { @@ -633,7 +649,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } } - db.head, err = NewHead(r, l, wlog, rngs[0], dir, db.chunkPool, opts.StripeSize, opts.SeriesLifecycleCallback) + db.head, err = NewHead(r, l, wlog, rngs[0], dir, db.chunkPool, opts.HeadChunksWriteBufferSize, opts.StripeSize, opts.SeriesLifecycleCallback) if err != nil { return nil, err } @@ -769,20 +785,26 @@ func (a dbAppender) Commit() error { } // Compact data if possible. After successful compaction blocks are reloaded -// which will also trigger blocks to be deleted that fall out of the retention -// window. -// If no blocks are compacted, the retention window state doesn't change. Thus, -// this is sufficient to reliably delete old data. -// Old blocks are only deleted on reload based on the new block's parent information. -// See DB.reload documentation for further information. -func (db *DB) Compact() (err error) { +// which will also delete the blocks that fall out of the retention window. +// Old blocks are only deleted on reloadBlocks based on the new block's parent information. +// See DB.reloadBlocks documentation for further information. +func (db *DB) Compact() (returnErr error) { db.cmtx.Lock() defer db.cmtx.Unlock() defer func() { - if err != nil { + if returnErr != nil { db.metrics.compactionsFailed.Inc() } }() + + lastBlockMaxt := int64(math.MinInt64) + defer func() { + returnErr = tsdb_errors.NewMulti( + returnErr, + errors.Wrap(db.head.truncateWAL(lastBlockMaxt), "WAL truncation in Compact defer"), + ).Err() + }() + // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { @@ -804,55 +826,59 @@ func (db *DB) Compact() (err error) { // so in order to make sure that overlaps are evaluated // consistently, we explicitly remove the last value // from the block interval here. - head := NewRangeHead(db.head, mint, maxt-1) - if err := db.compactHead(head); err != nil { - return err + if err := db.compactHead(NewRangeHead(db.head, mint, maxt-1)); err != nil { + return errors.Wrap(err, "compact head") } + // Consider only successful compactions for WAL truncation. + lastBlockMaxt = maxt + } + + // Clear some disk space before compacting blocks, especially important + // when Head compaction happened over a long time range. + if err := db.head.truncateWAL(lastBlockMaxt); err != nil { + return errors.Wrap(err, "WAL truncation in Compact") } return db.compactBlocks() } -// CompactHead compacts the given the RangeHead. -func (db *DB) CompactHead(head *RangeHead) (err error) { +// CompactHead compacts the given RangeHead. +func (db *DB) CompactHead(head *RangeHead) error { db.cmtx.Lock() defer db.cmtx.Unlock() - return db.compactHead(head) + if err := db.compactHead(head); err != nil { + return errors.Wrap(err, "compact head") + } + + if err := db.head.truncateWAL(head.BlockMaxTime()); err != nil { + return errors.Wrap(err, "WAL truncation") + } + return nil } -// compactHead compacts the given the RangeHead. +// compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. -func (db *DB) compactHead(head *RangeHead) (err error) { - // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). - // Because of this block intervals are always +1 than the total samples it includes. - maxt := head.MaxTime() + 1 - uid, err := db.compactor.Write(db.dir, head, head.MinTime(), maxt, nil) +func (db *DB) compactHead(head *RangeHead) error { + uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { return errors.Wrap(err, "persist head block") } runtime.GC() - - if err := db.reload(); err != nil { + if err := db.reloadBlocks(); err != nil { if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { - var merr tsdb_errors.MultiError - merr.Add(errors.Wrap(err, "reload blocks")) - merr.Add(errors.Wrapf(errRemoveAll, "delete persisted head block after failed db reload:%s", uid)) - return merr.Err() + return tsdb_errors.NewMulti( + errors.Wrap(err, "reloadBlocks blocks"), + errors.Wrapf(errRemoveAll, "delete persisted head block after failed db reloadBlocks:%s", uid), + ).Err() } - return errors.Wrap(err, "reload blocks") + return errors.Wrap(err, "reloadBlocks blocks") } - if (uid == ulid.ULID{}) { - // Compaction resulted in an empty block. - // Head truncating during db.reload() depends on the persisted blocks and - // in this case no new block will be persisted so manually truncate the head. - if err = db.head.Truncate(maxt); err != nil { - return errors.Wrap(err, "head truncate failed (in compact)") - } + if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { + return errors.Wrap(err, "head memory truncate") } runtime.GC() - return nil } @@ -881,11 +907,11 @@ func (db *DB) compactBlocks() (err error) { } runtime.GC() - if err := db.reload(); err != nil { + if err := db.reloadBlocks(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid) + return errors.Wrapf(err, "delete compacted block after failed db reloadBlocks:%s", uid) } - return errors.Wrap(err, "reload blocks") + return errors.Wrap(err, "reloadBlocks blocks") } runtime.GC() } @@ -904,9 +930,23 @@ func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) { return nil, false } -// reload blocks and trigger head truncation if new blocks appeared. +// reload reloads blocks and truncates the head and its WAL. +func (db *DB) reload() error { + if err := db.reloadBlocks(); err != nil { + return errors.Wrap(err, "reloadBlocks") + } + if len(db.blocks) == 0 { + return nil + } + if err := db.head.Truncate(db.blocks[len(db.blocks)-1].MaxTime()); err != nil { + return errors.Wrap(err, "head truncate") + } + return nil +} + +// reloadBlocks reloads blocks without touching head. // Blocks that are obsolete due to replacement or retention will be deleted. -func (db *DB) reload() (err error) { +func (db *DB) reloadBlocks() (err error) { defer func() { if err != nil { db.metrics.reloadsFailed.Inc() @@ -945,11 +985,11 @@ func (db *DB) reload() (err error) { block.Close() } } - var merr tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for ulid, err := range corrupted { - merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) + errs.Add(errors.Wrapf(err, "corrupted block %s", ulid.String())) } - return merr.Err() + return errs.Err() } var ( @@ -989,7 +1029,7 @@ func (db *DB) reload() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reload", "detail", overlaps.String()) + level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) } // Append blocks to old, deletable blocks, so we can close them. @@ -999,15 +1039,9 @@ func (db *DB) reload() (err error) { } } if err := db.deleteBlocks(deletable); err != nil { - return err + return errors.Wrapf(err, "delete %v blocks", len(deletable)) } - - // Garbage collect data in the head if the most recent persisted block - // covers data of its current time range. - if len(toLoad) == 0 { - return nil - } - return errors.Wrap(db.head.Truncate(toLoad[len(toLoad)-1].Meta().MaxTime), "head truncate failed") + return nil } func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { @@ -1020,7 +1054,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po for _, bDir := range bDirs { meta, _, err := readMetaFile(bDir) if err != nil { - level.Error(l).Log("msg", "Failed to read meta.json for a block during reload. Skipping", "dir", bDir, "err", err) + level.Error(l).Log("msg", "Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) continue } @@ -1077,7 +1111,7 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} { // set in the db options. func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { // Time retention is disabled or no blocks to work with. - if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 { + if len(blocks) == 0 || db.opts.RetentionDuration == 0 { return } @@ -1100,7 +1134,7 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc // set in the db options. func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { // Size retention is disabled or no blocks to work with. - if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 { + if len(blocks) == 0 || db.opts.MaxBytes <= 0 { return } @@ -1295,7 +1329,9 @@ func (db *DB) Head() *Head { // Close the partition. func (db *DB) Close() error { close(db.stopc) - db.compactCancel() + if db.compactCancel != nil { + db.compactCancel() + } <-db.donec db.mtx.Lock() @@ -1308,15 +1344,14 @@ func (db *DB) Close() error { g.Go(pb.Close) } - var merr tsdb_errors.MultiError - - merr.Add(g.Wait()) - + errs := tsdb_errors.NewMulti(g.Wait()) if db.lockf != nil { - merr.Add(db.lockf.Release()) + errs.Add(db.lockf.Release()) + } + if db.head != nil { + errs.Add(db.head.Close()) } - merr.Add(db.head.Close()) - return merr.Err() + return errs.Err() } // DisableCompactions disables auto compactions. @@ -1502,7 +1537,11 @@ func (db *DB) CleanTombstones() (err error) { newUIDs = append(newUIDs, *uid) } } - return errors.Wrap(db.reload(), "reload blocks") + + if err := db.reloadBlocks(); err != nil { + return errors.Wrap(err, "reload blocks") + } + return nil } func isBlockDir(fi os.FileInfo) bool { @@ -1577,15 +1616,6 @@ func nextSequenceFile(dir string) (string, int, error) { return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil } -func closeAll(cs []io.Closer) error { - var merr tsdb_errors.MultiError - - for _, c := range cs { - merr.Add(c.Close()) - } - return merr.Err() -} - func exponential(d, min, max time.Duration) time.Duration { d *= 2 if d < min { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go index 69d36624800f4..aeac4d27716d8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go @@ -17,21 +17,59 @@ package errors import ( "bytes" "fmt" + "io" ) -// The MultiError type implements the error interface, and contains the -// Errors used to construct it. -type MultiError []error +// multiError type allows combining multiple errors into one. +type multiError []error -// Returns a concatenated string of the contained errors -func (es MultiError) Error() string { +// NewMulti returns multiError with provided errors added if not nil. +func NewMulti(errs ...error) multiError { // nolint:golint + m := multiError{} + m.Add(errs...) + return m +} + +// Add adds single or many errors to the error list. Each error is added only if not nil. +// If the error is a nonNilMultiError type, the errors inside nonNilMultiError are added to the main multiError. +func (es *multiError) Add(errs ...error) { + for _, err := range errs { + if err == nil { + continue + } + if merr, ok := err.(nonNilMultiError); ok { + *es = append(*es, merr.errs...) + continue + } + *es = append(*es, err) + } +} + +// Err returns the error list as an error or nil if it is empty. +func (es multiError) Err() error { + if len(es) == 0 { + return nil + } + return nonNilMultiError{errs: es} +} + +// nonNilMultiError implements the error interface, and it represents +// multiError with at least one error inside it. +// This type is needed to make sure that nil is returned when no error is combined in multiError for err != nil +// check to work. +type nonNilMultiError struct { + errs multiError +} + +// Error returns a concatenated string of the contained errors. +func (es nonNilMultiError) Error() string { var buf bytes.Buffer - if len(es) > 1 { - fmt.Fprintf(&buf, "%d errors: ", len(es)) + if len(es.errs) > 1 { + fmt.Fprintf(&buf, "%d errors: ", len(es.errs)) } - for i, err := range es { + for i, err := range es.errs { if i != 0 { buf.WriteString("; ") } @@ -41,22 +79,11 @@ func (es MultiError) Error() string { return buf.String() } -// Add adds the error to the error list if it is not nil. -func (es *MultiError) Add(err error) { - if err == nil { - return - } - if merr, ok := err.(MultiError); ok { - *es = append(*es, merr...) - } else { - *es = append(*es, err) - } -} - -// Err returns the error list as an error or nil if it is empty. -func (es MultiError) Err() error { - if len(es) == 0 { - return nil +// CloseAll closes all given closers while recording error in MultiError. +func CloseAll(cs []io.Closer) error { + errs := NewMulti() + for _, c := range cs { + errs.Add(c.Close()) } - return es + return errs.Err() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go index 5cb1238103925..4dbca4f9740f9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go @@ -48,7 +48,7 @@ func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) { b, err := mmap(f, size) if err != nil { - return nil, errors.Wrap(err, "mmap") + return nil, errors.Wrapf(err, "mmap, size %d", size) } return &MmapFile{f: f, b: b}, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 3a577763d64a0..342073dd6c2b9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -28,6 +28,8 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -38,7 +40,6 @@ import ( "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/wal" - "go.uber.org/atomic" ) var ( @@ -52,11 +53,12 @@ var ( // Head handles reads and writes of time series data within a time window. type Head struct { - chunkRange atomic.Int64 - numSeries atomic.Uint64 - minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. - minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. - lastSeriesID atomic.Uint64 + chunkRange atomic.Int64 + numSeries atomic.Uint64 + minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. + minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. + lastWALTruncationTime atomic.Int64 + lastSeriesID atomic.Uint64 metrics *headMetrics wal *wal.WAL @@ -293,7 +295,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.Postings // stripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger stripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. // A smaller stripeSize reduces the memory allocated, but can decrease performance with large number of series. -func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int64, chkDirRoot string, pool chunkenc.Pool, stripeSize int, seriesCallback SeriesLifecycleCallback) (*Head, error) { +func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int64, chkDirRoot string, chkPool chunkenc.Pool, chkWriteBufferSize, stripeSize int, seriesCallback SeriesLifecycleCallback) (*Head, error) { if l == nil { l = log.NewNopLogger() } @@ -323,14 +325,15 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int h.chunkRange.Store(chunkRange) h.minTime.Store(math.MaxInt64) h.maxTime.Store(math.MinInt64) + h.lastWALTruncationTime.Store(math.MinInt64) h.metrics = newHeadMetrics(h, r) - if pool == nil { - pool = chunkenc.NewPool() + if chkPool == nil { + chkPool = chunkenc.NewPool() } var err error - h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(mmappedChunksDir(chkDirRoot), pool) + h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(mmappedChunksDir(chkDirRoot), chkPool, chkWriteBufferSize) if err != nil { return nil, err } @@ -776,8 +779,20 @@ func (h *Head) removeCorruptedMmappedChunks(err error) map[uint64][]*mmappedChun return mmappedChunks } -// Truncate removes old data before mint from the head. +// Truncate removes old data before mint from the head and WAL. func (h *Head) Truncate(mint int64) (err error) { + initialize := h.MinTime() == math.MaxInt64 + if err := h.truncateMemory(mint); err != nil { + return err + } + if initialize { + return nil + } + return h.truncateWAL(mint) +} + +// truncateMemory removes old data before mint from the head. +func (h *Head) truncateMemory(mint int64) (err error) { defer func() { if err != nil { h.metrics.headTruncateFail.Inc() @@ -813,11 +828,16 @@ func (h *Head) Truncate(mint int64) (err error) { if err := h.chunkDiskMapper.Truncate(mint); err != nil { return errors.Wrap(err, "truncate chunks.HeadReadWriter") } + return nil +} - if h.wal == nil { +// truncateWAL removes old data before mint from the WAL. +func (h *Head) truncateWAL(mint int64) error { + if h.wal == nil || mint <= h.lastWALTruncationTime.Load() { return nil } - start = time.Now() + start := time.Now() + h.lastWALTruncationTime.Store(mint) first, last, err := wal.Segments(h.wal.Dir()) if err != nil { @@ -825,8 +845,7 @@ func (h *Head) Truncate(mint int64) (err error) { } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. - err = h.wal.NextSegment() - if err != nil { + if err := h.wal.NextSegment(); err != nil { return errors.Wrap(err, "next segment") } last-- // Never consider last segment for checkpoint. @@ -950,10 +969,19 @@ func (h *RangeHead) MinTime() int64 { return h.mint } +// MaxTime returns the max time of actual data fetch-able from the head. +// This controls the chunks time range which is closed [b.MinTime, b.MaxTime]. func (h *RangeHead) MaxTime() int64 { return h.maxt } +// BlockMaxTime returns the max time of the potential block created from this head. +// It's different to MaxTime as we need to add +1 millisecond to block maxt because block +// intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. +func (h *RangeHead) BlockMaxTime() int64 { + return h.MaxTime() + 1 +} + func (h *RangeHead) NumSeries() uint64 { return h.head.NumSeries() } @@ -1437,12 +1465,11 @@ func (h *Head) Close() error { h.closedMtx.Lock() defer h.closedMtx.Unlock() h.closed = true - var merr tsdb_errors.MultiError - merr.Add(h.chunkDiskMapper.Close()) + errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close()) if h.wal != nil { - merr.Add(h.wal.Close()) + errs.Add(h.wal.Close()) } - return merr.Err() + return errs.Err() } type headChunkReader struct { @@ -2346,7 +2373,8 @@ func (h *Head) Size() int64 { if h.wal != nil { walSize, _ = h.wal.Size() } - return walSize + h.chunkDiskMapper.Size() + cdmSize, _ := h.chunkDiskMapper.Size() + return walSize + cdmSize } func (h *RangeHead) Size() int64 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index d7559b346ac63..70184cbe15974 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -29,6 +29,7 @@ import ( "unsafe" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -102,6 +103,12 @@ func newCRC32() hash.Hash32 { return crc32.New(castagnoliTable) } +type symbolCacheEntry struct { + index uint32 + lastValue string + lastValueIndex uint32 +} + // Writer implements the IndexWriter interface for the standard // serialization format. type Writer struct { @@ -124,10 +131,11 @@ type Writer struct { buf1 encoding.Encbuf buf2 encoding.Encbuf - numSymbols int - symbols *Symbols - symbolFile *fileutil.MmapFile - lastSymbol string + numSymbols int + symbols *Symbols + symbolFile *fileutil.MmapFile + lastSymbol string + symbolCache map[string]symbolCacheEntry labelIndexes []labelIndexHashEntry // Label index offsets. labelNames map[string]uint64 // Label names, and their usage. @@ -223,8 +231,9 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, - labelNames: make(map[string]uint64, 1<<8), - crc32: newCRC32(), + symbolCache: make(map[string]symbolCacheEntry, 1<<8), + labelNames: make(map[string]uint64, 1<<8), + crc32: newCRC32(), } if err := iw.writeMeta(); err != nil { return nil, err @@ -429,18 +438,31 @@ func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta w.buf2.PutUvarint(len(lset)) for _, l := range lset { - index, err := w.symbols.ReverseLookup(l.Name) - if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + var err error + cacheEntry, ok := w.symbolCache[l.Name] + nameIndex := cacheEntry.index + if !ok { + nameIndex, err = w.symbols.ReverseLookup(l.Name) + if err != nil { + return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + } } w.labelNames[l.Name]++ - w.buf2.PutUvarint32(index) + w.buf2.PutUvarint32(nameIndex) - index, err = w.symbols.ReverseLookup(l.Value) - if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + valueIndex := cacheEntry.lastValueIndex + if !ok || cacheEntry.lastValue != l.Value { + valueIndex, err = w.symbols.ReverseLookup(l.Value) + if err != nil { + return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + } + w.symbolCache[l.Name] = symbolCacheEntry{ + index: nameIndex, + lastValue: l.Value, + lastValueIndex: valueIndex, + } } - w.buf2.PutUvarint32(index) + w.buf2.PutUvarint32(valueIndex) } w.buf2.PutUvarint(len(chunks)) @@ -1074,10 +1096,10 @@ func NewFileReader(path string) (*Reader, error) { } r, err := newReader(realByteSlice(f.Bytes()), f) if err != nil { - var merr tsdb_errors.MultiError - merr.Add(err) - merr.Add(f.Close()) - return nil, merr + return nil, tsdb_errors.NewMulti( + err, + f.Close(), + ).Err() } return r, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 6df8cba9b4ebe..e27c9cf5f97f4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -20,6 +20,7 @@ import ( "unicode/utf8" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -96,12 +97,14 @@ func (q *blockBaseQuerier) Close() error { if q.closed { return errors.New("block querier already closed") } - var merr tsdb_errors.MultiError - merr.Add(q.index.Close()) - merr.Add(q.chunks.Close()) - merr.Add(q.tombstones.Close()) + + errs := tsdb_errors.NewMulti( + q.index.Close(), + q.chunks.Close(), + q.tombstones.Close(), + ) q.closed = true - return merr.Err() + return errs.Err() } type blockQuerier struct { @@ -482,7 +485,7 @@ type populateWithDelGenericSeriesIterator struct { i int err error - bufIter *deletedIterator + bufIter *DeletedIterator intervals tombstones.Intervals currDelIter chunkenc.Iterator @@ -498,7 +501,7 @@ func newPopulateWithDelGenericSeriesIterator( chunks: chunks, chks: chks, i: -1, - bufIter: &deletedIterator{}, + bufIter: &DeletedIterator{}, intervals: intervals, } } @@ -517,10 +520,10 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { return false } - p.bufIter.intervals = p.bufIter.intervals[:0] + p.bufIter.Intervals = p.bufIter.Intervals[:0] for _, interval := range p.intervals { if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { - p.bufIter.intervals = p.bufIter.intervals.Add(interval) + p.bufIter.Intervals = p.bufIter.Intervals.Add(interval) } } @@ -531,14 +534,14 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { // // TODO think how to avoid the typecasting to verify when it is head block. _, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk) - if len(p.bufIter.intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) { + if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) { // If there are no overlap with deletion intervals AND it's NOT an "open" head chunk, we can take chunk as it is. p.currDelIter = nil return true } // We don't want full chunk or it's potentially still opened, take just part of it. - p.bufIter.it = p.currChkMeta.Chunk.Iterator(nil) + p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(nil) p.currDelIter = p.bufIter return true } @@ -720,7 +723,8 @@ func (b *blockChunkSeriesSet) At() storage.ChunkSeries { } } -func newMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter { +// NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result. +func NewMergedStringIter(a index.StringIter, b index.StringIter) index.StringIter { return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()} } @@ -764,35 +768,35 @@ func (m mergedStringIter) Err() error { return m.b.Err() } -// deletedIterator wraps an Iterator and makes sure any deleted metrics are not -// returned. -type deletedIterator struct { - it chunkenc.Iterator - - intervals tombstones.Intervals +// DeletedIterator wraps chunk Iterator and makes sure any deleted metrics are not returned. +type DeletedIterator struct { + // Iter is an Iterator to be wrapped. + Iter chunkenc.Iterator + // Intervals are the deletion intervals. + Intervals tombstones.Intervals } -func (it *deletedIterator) At() (int64, float64) { - return it.it.At() +func (it *DeletedIterator) At() (int64, float64) { + return it.Iter.At() } -func (it *deletedIterator) Seek(t int64) bool { - if it.it.Err() != nil { +func (it *DeletedIterator) Seek(t int64) bool { + if it.Iter.Err() != nil { return false } - if ok := it.it.Seek(t); !ok { + if ok := it.Iter.Seek(t); !ok { return false } // Now double check if the entry falls into a deleted interval. ts, _ := it.At() - for _, itv := range it.intervals { + for _, itv := range it.Intervals { if ts < itv.Mint { return true } if ts > itv.Maxt { - it.intervals = it.intervals[1:] + it.Intervals = it.Intervals[1:] continue } @@ -804,12 +808,12 @@ func (it *deletedIterator) Seek(t int64) bool { return true } -func (it *deletedIterator) Next() bool { +func (it *DeletedIterator) Next() bool { Outer: - for it.it.Next() { - ts, _ := it.it.At() + for it.Iter.Next() { + ts, _ := it.Iter.At() - for _, tr := range it.intervals { + for _, tr := range it.Intervals { if tr.InBounds(ts) { continue Outer } @@ -818,14 +822,14 @@ Outer: return true } - it.intervals = it.intervals[1:] + it.Intervals = it.Intervals[1:] } return true } return false } -func (it *deletedIterator) Err() error { return it.it.Err() } +func (it *DeletedIterator) Err() error { return it.Iter.Err() } type nopChunkReader struct { emptyChunk chunkenc.Chunk diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index e7df28766c51f..408882e832437 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -19,6 +19,7 @@ import ( "sort" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/tombstones" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 02114cd4f67bd..cc777546e5c01 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -23,6 +23,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -82,18 +83,18 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { return errors.Wrapf(err, "copy content of index to index.repaired for block dir: %v", d) } - var merr tsdb_errors.MultiError - // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { - merr.Add(errors.Wrap(err, "rewrite of index.repaired")) - merr.Add(errors.Wrap(repl.Close(), "close")) - return errors.Wrapf(merr.Err(), "block dir: %v", d) + return tsdb_errors.NewMulti( + errors.Wrapf(err, "rewrite of index.repaired for block dir: %v", d), + errors.Wrap(repl.Close(), "close"), + ).Err() } if err := repl.Sync(); err != nil { - merr.Add(errors.Wrap(err, "sync of index.repaired")) - merr.Add(errors.Wrap(repl.Close(), "close")) - return errors.Wrapf(merr.Err(), "block dir: %v", d) + return tsdb_errors.NewMulti( + errors.Wrapf(err, "sync of index.repaired for block dir: %v", d), + errors.Wrap(repl.Close(), "close"), + ).Err() } if err := repl.Close(); err != nil { return errors.Wrapf(repl.Close(), "close repaired index for block dir: %v", d) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go index d34a75af4d499..aa74a7d1e3561 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -27,6 +27,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -125,10 +126,8 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { } size += n - var merr tsdb_errors.MultiError - if merr.Add(f.Sync()); merr.Err() != nil { - merr.Add(f.Close()) - return 0, merr.Err() + if err := f.Sync(); err != nil { + return 0, tsdb_errors.NewMulti(err, f.Close()).Err() } if err = f.Close(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go index 15f129d318dff..be2c63f9f4329 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go @@ -19,6 +19,7 @@ import ( "path/filepath" "github.com/go-kit/kit/log" + "github.com/prometheus/prometheus/storage" ) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 1e503a1481b51..7856ac14c6576 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -31,6 +31,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/encoding" "github.com/prometheus/prometheus/tsdb/fileutil" diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go index 33e2e58cedbf3..a264e1e958ebf 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go @@ -28,6 +28,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/record" @@ -67,14 +68,12 @@ func DeleteCheckpoints(dir string, maxIndex int) error { return err } - var errs tsdb_errors.MultiError + errs := tsdb_errors.NewMulti() for _, checkpoint := range checkpoints { if checkpoint.index >= maxIndex { break } - if err := os.RemoveAll(filepath.Join(dir, checkpoint.name)); err != nil { - errs.Add(err) - } + errs.Add(os.RemoveAll(filepath.Join(dir, checkpoint.name))) } return errs.Err() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go index c5401ddf1e8ea..3aa87b2c07068 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -33,6 +33,7 @@ import ( "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -73,9 +74,18 @@ func (p *page) reset() { p.flushed = 0 } +// SegmentFile represents the underlying file used to store a segment. +type SegmentFile interface { + Stat() (os.FileInfo, error) + Sync() error + io.Writer + io.Reader + io.Closer +} + // Segment represents a segment file. type Segment struct { - *os.File + SegmentFile dir string i int } @@ -129,7 +139,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { return nil, errors.Wrap(err, "zero-pad torn page") } } - return &Segment{File: f, i: k, dir: dir}, nil + return &Segment{SegmentFile: f, i: k, dir: dir}, nil } // CreateSegment creates a new segment k in dir. @@ -138,7 +148,7 @@ func CreateSegment(dir string, k int) (*Segment, error) { if err != nil { return nil, err } - return &Segment{File: f, i: k, dir: dir}, nil + return &Segment{SegmentFile: f, i: k, dir: dir}, nil } // OpenReadSegment opens the segment with the given filename. @@ -151,7 +161,7 @@ func OpenReadSegment(fn string) (*Segment, error) { if err != nil { return nil, err } - return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil + return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil } // WAL is a write ahead log that stores records in segment files. @@ -516,8 +526,10 @@ func (w *WAL) flushPage(clear bool) error { if clear { p.alloc = pageSize // Write till end of page. } + n, err := w.segment.Write(p.buf[p.flushed:p.alloc]) if err != nil { + p.flushed += n return err } p.flushed += n @@ -663,6 +675,9 @@ func (w *WAL) log(rec []byte, final bool) error { if w.page.full() { if err := w.flushPage(true); err != nil { + // TODO When the flushing fails at this point and the record has not been + // fully written to the buffer, we end up with a corrupted WAL because some part of the + // record have been written to the buffer, while the rest of the record will be discarded. return err } } @@ -704,7 +719,7 @@ func (w *WAL) Truncate(i int) (err error) { func (w *WAL) fsync(f *Segment) error { start := time.Now() - err := f.File.Sync() + err := f.Sync() w.metrics.fsyncDuration.Observe(time.Since(start).Seconds()) return err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go index 1fb78005f8c5f..8670567f132b2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go @@ -29,6 +29,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/tsdb/record" ) diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go index 4f0f08fe8f346..1c87e35de5d7b 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go @@ -21,6 +21,8 @@ import ( "path/filepath" "strconv" "testing" + + "github.com/stretchr/testify/require" ) const ( @@ -137,32 +139,32 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) { func DirHash(t *testing.T, path string) []byte { hash := sha256.New() err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { - Ok(t, err) + require.NoError(t, err) if info.IsDir() { return nil } f, err := os.Open(path) - Ok(t, err) + require.NoError(t, err) defer f.Close() _, err = io.Copy(hash, f) - Ok(t, err) + require.NoError(t, err) _, err = io.WriteString(hash, strconv.Itoa(int(info.Size()))) - Ok(t, err) + require.NoError(t, err) _, err = io.WriteString(hash, info.Name()) - Ok(t, err) + require.NoError(t, err) modTime, err := info.ModTime().GobEncode() - Ok(t, err) + require.NoError(t, err) _, err = io.WriteString(hash, string(modTime)) - Ok(t, err) + require.NoError(t, err) return nil }) - Ok(t, err) + require.NoError(t, err) return hash.Sum(nil) } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go index 1645f80d5c358..8ec50cb00f421 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go @@ -23,140 +23,11 @@ package testutil import ( - "fmt" - "reflect" "testing" - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" "go.uber.org/goleak" ) -// This package is imported by non-test code and therefore cannot import the -// testing package, which has side effects such as adding flags. Hence we use an -// interface to testing.{T,B}. -type TB interface { - Helper() - Fatalf(string, ...interface{}) -} - -// Assert fails the test if the condition is false. -func Assert(tb TB, condition bool, format string, a ...interface{}) { - tb.Helper() - if !condition { - tb.Fatalf("\033[31m"+format+"\033[39m\n", a...) - } -} - -// Ok fails the test if an err is not nil. -func Ok(tb TB, err error) { - tb.Helper() - if err != nil { - tb.Fatalf("\033[31munexpected error: %v\033[39m\n", err) - } -} - -// NotOk fails the test if an err is nil. -func NotOk(tb TB, err error, a ...interface{}) { - tb.Helper() - if err == nil { - if len(a) != 0 { - format := a[0].(string) - tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a[1:]...) - } - tb.Fatalf("\033[31mexpected error, got none\033[39m") - } -} - -// Equals fails the test if exp is not equal to act. -func Equals(tb TB, exp, act interface{}, msgAndArgs ...interface{}) { - tb.Helper() - if !reflect.DeepEqual(exp, act) { - tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v%s\033[39m\n", formatMessage(msgAndArgs), exp, act, diff(exp, act)) - } -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - return "\n\nDiff:\n" + diff -} - -// ErrorEqual compares Go errors for equality. -func ErrorEqual(tb TB, left, right error, msgAndArgs ...interface{}) { - tb.Helper() - if left == right { - return - } - - if left != nil && right != nil { - Equals(tb, left.Error(), right.Error(), msgAndArgs...) - return - } - - tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v\033[39m\n", formatMessage(msgAndArgs), left, right) -} - -func formatMessage(msgAndArgs []interface{}) string { - if len(msgAndArgs) == 0 { - return "" - } - - if msg, ok := msgAndArgs[0].(string); ok { - return fmt.Sprintf("\n\nmsg: "+msg, msgAndArgs[1:]...) - } - return "" -} - // TolerantVerifyLeak verifies go leaks but excludes the go routines that are // launched as side effects of some of our dependencies. func TolerantVerifyLeak(m *testing.M) { diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 284278e5277e9..45669f69cb734 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/route" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/gate" "github.com/prometheus/prometheus/pkg/labels" @@ -1477,9 +1478,9 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { var ( snapdir = filepath.Join(api.dbDir, "snapshots") - name = fmt.Sprintf("%s-%x", + name = fmt.Sprintf("%s-%016x", time.Now().UTC().Format("20060102T150405Z0700"), - rand.Int()) + rand.Int63()) dir = filepath.Join(snapdir, name) ) if err := os.MkdirAll(dir, 0777); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 550ebc351f9e5..33993bd446bf0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -14,6 +14,7 @@ import ( "os" "path" "path/filepath" + "sort" "strings" "time" @@ -66,6 +67,7 @@ func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id // It makes sure cleanup is done on error to avoid partial block uploads. // It also verifies basic features of Thanos block. // TODO(bplotka): Ensure bucket operations have reasonable backoff retries. +// NOTE: Upload updates `meta.Thanos.File` section. func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string) error { df, err := os.Stat(bdir) if err != nil { @@ -81,7 +83,7 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.Wrap(err, "not a block dir") } - meta, err := metadata.Read(bdir) + meta, err := metadata.ReadFromDir(bdir) if err != nil { // No meta or broken meta file. return errors.Wrap(err, "read meta") @@ -91,8 +93,18 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.New("empty external labels are not allowed for Thanos block.") } - if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(DebugMetas, fmt.Sprintf("%s.json", id))); err != nil { - return errors.Wrap(err, "upload meta file to debug dir") + meta.Thanos.Files, err = gatherFileStats(bdir) + if err != nil { + return errors.Wrap(err, "gather meta file stats") + } + + metaEncoded := bytes.Buffer{} + if err := meta.Write(&metaEncoded); err != nil { + return errors.Wrap(err, "encode meta file") + } + + if err := bkt.Upload(ctx, path.Join(DebugMetas, fmt.Sprintf("%s.json", id)), bytes.NewReader(metaEncoded.Bytes())); err != nil { + return cleanUp(logger, bkt, id, errors.Wrap(err, "upload debug meta file")) } if err := objstore.UploadDir(ctx, logger, bkt, path.Join(bdir, ChunksDirname), path.Join(id.String(), ChunksDirname)); err != nil { @@ -103,9 +115,8 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return cleanUp(logger, bkt, id, errors.Wrap(err, "upload index")) } - // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file - // to be pending uploads. - if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(id.String(), MetaFilename)); err != nil { + // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. + if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), &metaEncoded); err != nil { return cleanUp(logger, bkt, id, errors.Wrap(err, "upload meta file")) } @@ -122,7 +133,7 @@ func cleanUp(logger log.Logger, bkt objstore.Bucket, id ulid.ULID, err error) er } // MarkForDeletion creates a file which stores information about when the block was marked for deletion. -func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, markedForDeletion prometheus.Counter) error { +func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, details string, markedForDeletion prometheus.Counter) error { deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) deletionMarkExists, err := bkt.Exists(ctx, deletionMarkFile) if err != nil { @@ -137,6 +148,7 @@ func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket ID: id, DeletionTime: time.Now().Unix(), Version: metadata.DeletionMarkVersion1, + Details: details, }) if err != nil { return errors.Wrap(err, "json encode deletion mark") @@ -226,9 +238,7 @@ func IsBlockDir(path string) (id ulid.ULID, ok bool) { // GetSegmentFiles returns list of segment files for given block. Paths are relative to the chunks directory. // In case of errors, nil is returned. func GetSegmentFiles(blockDir string) []string { - chunksDir := filepath.Join(blockDir, ChunksDirname) - - files, err := ioutil.ReadDir(chunksDir) + files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname)) if err != nil { return nil } @@ -240,3 +250,70 @@ func GetSegmentFiles(blockDir string) []string { } return result } + +// TODO(bwplotka): Gather stats when dirctly uploading files. +func gatherFileStats(blockDir string) (res []metadata.File, _ error) { + files, err := ioutil.ReadDir(filepath.Join(blockDir, ChunksDirname)) + if err != nil { + return nil, errors.Wrapf(err, "read dir %v", filepath.Join(blockDir, ChunksDirname)) + } + for _, f := range files { + res = append(res, metadata.File{ + RelPath: filepath.Join(ChunksDirname, f.Name()), + SizeBytes: f.Size(), + }) + } + + indexFile, err := os.Stat(filepath.Join(blockDir, IndexFilename)) + if err != nil { + return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, IndexFilename)) + } + res = append(res, metadata.File{ + RelPath: indexFile.Name(), + SizeBytes: indexFile.Size(), + }) + + metaFile, err := os.Stat(filepath.Join(blockDir, MetaFilename)) + if err != nil { + return nil, errors.Wrapf(err, "stat %v", filepath.Join(blockDir, MetaFilename)) + } + res = append(res, metadata.File{RelPath: metaFile.Name()}) + + sort.Slice(res, func(i, j int) bool { + return strings.Compare(res[i].RelPath, res[j].RelPath) < 0 + }) + // TODO(bwplotka): Add optional files like tombstones? + return res, err +} + +// MarkForNoCompact creates a file which marks block to be not compacted. +func MarkForNoCompact(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID, reason metadata.NoCompactReason, details string, markedForNoCompact prometheus.Counter) error { + m := path.Join(id.String(), metadata.NoCompactMarkFilename) + noCompactMarkExists, err := bkt.Exists(ctx, m) + if err != nil { + return errors.Wrapf(err, "check exists %s in bucket", m) + } + if noCompactMarkExists { + level.Warn(logger).Log("msg", "requested to mark for no compaction, but file already exists; this should not happen; investigate", "err", errors.Errorf("file %s already exists in bucket", m)) + return nil + } + + noCompactMark, err := json.Marshal(metadata.NoCompactMark{ + ID: id, + Version: metadata.NoCompactMarkVersion1, + + NoCompactTime: time.Now().Unix(), + Reason: reason, + Details: details, + }) + if err != nil { + return errors.Wrap(err, "json encode no compact mark") + } + + if err := bkt.Upload(ctx, m, bytes.NewBuffer(noCompactMark)); err != nil { + return errors.Wrapf(err, "upload file %s to bucket", m) + } + markedForNoCompact.Inc() + level.Info(logger).Log("msg", "block has been marked for no compaction", "block", id) + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index bd83f6e2fbc27..3a05e97eb34b9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -24,14 +24,15 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/tsdb" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" - "golang.org/x/sync/errgroup" - "gopkg.in/yaml.v2" ) type fetcherMetrics struct { @@ -70,6 +71,9 @@ const ( // but don't have a replacement block yet. markedForDeletionMeta = "marked-for-deletion" + // MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric. + MarkedForNoCompactionMeta = "marked-for-no-compact" + // Modified label values. replicaRemovedMeta = "replica-label-removed" ) @@ -110,6 +114,7 @@ func newFetcherMetrics(reg prometheus.Registerer) *fetcherMetrics { []string{timeExcludedMeta}, []string{duplicateMeta}, []string{markedForDeletionMeta}, + []string{MarkedForNoCompactionMeta}, ) m.modified = extprom.NewTxGaugeVec( reg, @@ -223,7 +228,7 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met // Best effort load from local dir. if f.cacheDir != "" { - m, err := metadata.Read(cachedBlockDir) + m, err := metadata.ReadFromDir(cachedBlockDir) if err == nil { return m, nil } @@ -257,7 +262,7 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met return nil, errors.Wrapf(ErrorSyncMetaCorrupted, "meta.json %v unmarshal: %v", metaFile, err) } - if m.Version != metadata.MetaVersion1 { + if m.Version != metadata.TSDBVersion1 { return nil, errors.Errorf("unexpected meta file: %s version: %d", metaFile, m.Version) } @@ -267,7 +272,7 @@ func (f *BaseFetcher) loadMeta(ctx context.Context, id ulid.ULID) (*metadata.Met level.Warn(f.logger).Log("msg", "best effort mkdir of the meta.json block dir failed; ignoring", "dir", cachedBlockDir, "err", err) } - if err := metadata.Write(f.logger, cachedBlockDir, m); err != nil { + if err := m.WriteToDir(f.logger, cachedBlockDir); err != nil { level.Warn(f.logger).Log("msg", "best effort save of the meta.json to local dir failed; ignoring", "dir", cachedBlockDir, "err", err) } } @@ -278,7 +283,7 @@ type response struct { metas map[ulid.ULID]*metadata.Meta partial map[ulid.ULID]error // If metaErr > 0 it means incomplete view, so some metas, failed to be loaded. - metaErrs tsdberrors.MultiError + metaErrs errutil.MultiError noMetas float64 corruptedMetas float64 @@ -757,16 +762,18 @@ func (f *ConsistencyDelayMetaFilter) Filter(_ context.Context, metas map[ulid.UL type IgnoreDeletionMarkFilter struct { logger log.Logger delay time.Duration + concurrency int bkt objstore.InstrumentedBucketReader deletionMarkMap map[ulid.ULID]*metadata.DeletionMark } // NewIgnoreDeletionMarkFilter creates IgnoreDeletionMarkFilter. -func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader, delay time.Duration) *IgnoreDeletionMarkFilter { +func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader, delay time.Duration, concurrency int) *IgnoreDeletionMarkFilter { return &IgnoreDeletionMarkFilter{ - logger: logger, - bkt: bkt, - delay: delay, + logger: logger, + bkt: bkt, + delay: delay, + concurrency: concurrency, } } @@ -780,34 +787,82 @@ func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*metadata. func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { f.deletionMarkMap = make(map[ulid.ULID]*metadata.DeletionMark) + // Make a copy of block IDs to check, in order to avoid concurrency issues + // between the scheduler and workers. + blockIDs := make([]ulid.ULID, 0, len(metas)) for id := range metas { - deletionMark, err := metadata.ReadDeletionMark(ctx, f.bkt, f.logger, id.String()) - if err == metadata.ErrorDeletionMarkNotFound { - continue - } - if errors.Cause(err) == metadata.ErrorUnmarshalDeletionMark { - level.Warn(f.logger).Log("msg", "found partial deletion-mark.json; if we will see it happening often for the same block, consider manually deleting deletion-mark.json from the object storage", "block", id, "err", err) - continue - } - if err != nil { - return err - } - f.deletionMarkMap[id] = deletionMark - if time.Since(time.Unix(deletionMark.DeletionTime, 0)).Seconds() > f.delay.Seconds() { - synced.WithLabelValues(markedForDeletionMeta).Inc() - delete(metas, id) + blockIDs = append(blockIDs, id) + } + + var ( + eg errgroup.Group + ch = make(chan ulid.ULID, f.concurrency) + mtx sync.Mutex + ) + + for i := 0; i < f.concurrency; i++ { + eg.Go(func() error { + for id := range ch { + m := &metadata.DeletionMark{} + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial deletion-mark.json; if we will see it happening often for the same block, consider manually deleting deletion-mark.json from the object storage", "block", id, "err", err) + continue + } + return err + } + + // Keep track of the blocks marked for deletion and filter them out if their + // deletion time is greater than the configured delay. + mtx.Lock() + f.deletionMarkMap[id] = m + if time.Since(time.Unix(m.DeletionTime, 0)).Seconds() > f.delay.Seconds() { + synced.WithLabelValues(markedForDeletionMeta).Inc() + delete(metas, id) + } + mtx.Unlock() + } + + return nil + }) + } + + // Workers scheduled, distribute blocks. + eg.Go(func() error { + defer close(ch) + + for _, id := range blockIDs { + select { + case ch <- id: + // Nothing to do. + case <-ctx.Done(): + return ctx.Err() + } } + + return nil + }) + + if err := eg.Wait(); err != nil { + return errors.Wrap(err, "filter blocks marked for deletion") } + return nil } +var ( + SelectorSupportedRelabelActions = map[relabel.Action]struct{}{relabel.Keep: {}, relabel.Drop: {}, relabel.HashMod: {}} +) + // ParseRelabelConfig parses relabel configuration. -func ParseRelabelConfig(contentYaml []byte) ([]*relabel.Config, error) { +func ParseRelabelConfig(contentYaml []byte, supportedActions map[relabel.Action]struct{}) ([]*relabel.Config, error) { var relabelConfig []*relabel.Config if err := yaml.Unmarshal(contentYaml, &relabelConfig); err != nil { return nil, errors.Wrap(err, "parsing relabel configuration") } - supportedActions := map[relabel.Action]struct{}{relabel.Keep: {}, relabel.Drop: {}, relabel.HashMod: {}} for _, cfg := range relabelConfig { if _, ok := supportedActions[cfg.Action]; !ok { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go index 41ac09dad7e7b..00c8f0ba3a509 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/index.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "hash/crc32" + "math" "math/rand" "path/filepath" "sort" @@ -29,7 +30,7 @@ import ( // VerifyIndex does a full run over a block index and verifies that it fulfills the order invariants. func VerifyIndex(logger log.Logger, fn string, minTime int64, maxTime int64) error { - stats, err := GatherIndexIssueStats(logger, fn, minTime, maxTime) + stats, err := GatherIndexHealthStats(logger, fn, minTime, maxTime) if err != nil { return err } @@ -37,9 +38,9 @@ func VerifyIndex(logger log.Logger, fn string, minTime int64, maxTime int64) err return stats.AnyErr() } -type Stats struct { +type HealthStats struct { // TotalSeries represents total number of series in block. - TotalSeries int + TotalSeries int64 // OutOfOrderSeries represents number of series that have out of order chunks. OutOfOrderSeries int @@ -60,12 +61,41 @@ type Stats struct { // OutOfOrderLabels represents the number of postings that contained out // of order labels, a bug present in Prometheus 2.8.0 and below. OutOfOrderLabels int + + // Debug Statistics. + SeriesMinLifeDuration time.Duration + SeriesAvgLifeDuration time.Duration + SeriesMaxLifeDuration time.Duration + + SeriesMinLifeDurationWithoutSingleSampleSeries time.Duration + SeriesAvgLifeDurationWithoutSingleSampleSeries time.Duration + SeriesMaxLifeDurationWithoutSingleSampleSeries time.Duration + + SeriesMinChunks int64 + SeriesAvgChunks int64 + SeriesMaxChunks int64 + + TotalChunks int64 + + ChunkMinDuration time.Duration + ChunkAvgDuration time.Duration + ChunkMaxDuration time.Duration + + ChunkMinSize int64 + ChunkAvgSize int64 + ChunkMaxSize int64 + + SingleSampleSeries int64 + SingleSampleChunks int64 + + LabelNamesCount int64 + MetricLabelValuesCount int64 } -// PrometheusIssue5372Err returns an error if the Stats object indicates +// PrometheusIssue5372Err returns an error if the HealthStats object indicates // postings with out of order labels. This is corrected by Prometheus Issue // #5372 and affects Prometheus versions 2.8.0 and below. -func (i Stats) PrometheusIssue5372Err() error { +func (i HealthStats) PrometheusIssue5372Err() error { if i.OutOfOrderLabels > 0 { return errors.Errorf("index contains %d postings with out of order labels", i.OutOfOrderLabels) @@ -74,7 +104,7 @@ func (i Stats) PrometheusIssue5372Err() error { } // Issue347OutsideChunksErr returns error if stats indicates issue347 block issue, that is repaired explicitly before compaction (on plan block). -func (i Stats) Issue347OutsideChunksErr() error { +func (i HealthStats) Issue347OutsideChunksErr() error { if i.Issue347OutsideChunks > 0 { return errors.Errorf("found %d chunks outside the block time range introduced by https://github.com/prometheus/tsdb/issues/347", i.Issue347OutsideChunks) } @@ -82,7 +112,7 @@ func (i Stats) Issue347OutsideChunksErr() error { } // CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure. -func (i Stats) CriticalErr() error { +func (i HealthStats) CriticalErr() error { var errMsg []string if i.OutOfOrderSeries > 0 { @@ -113,7 +143,7 @@ func (i Stats) CriticalErr() error { } // AnyErr returns error if stats indicates any block issue. -func (i Stats) AnyErr() error { +func (i HealthStats) AnyErr() error { var errMsg []string if err := i.CriticalErr(); err != nil { @@ -135,11 +165,44 @@ func (i Stats) AnyErr() error { return nil } -// GatherIndexIssueStats returns useful counters as well as outsider chunks (chunks outside of block time range) that +type minMaxSumInt64 struct { + sum int64 + min int64 + max int64 + + cnt int64 +} + +func newMinMaxSumInt64() minMaxSumInt64 { + return minMaxSumInt64{ + min: math.MaxInt64, + max: math.MinInt64, + } +} + +func (n *minMaxSumInt64) Add(v int64) { + n.cnt++ + n.sum += v + if n.min > v { + n.min = v + } + if n.max < v { + n.max = v + } +} + +func (n *minMaxSumInt64) Avg() int64 { + if n.cnt == 0 { + return 0 + } + return n.sum / n.cnt +} + +// GatherIndexHealthStats returns useful counters as well as outsider chunks (chunks outside of block time range) that // helps to assess index health. // It considers https://github.com/prometheus/tsdb/issues/347 as something that Thanos can handle. -// See Stats.Issue347OutsideChunks for details. -func GatherIndexIssueStats(logger log.Logger, fn string, minTime int64, maxTime int64) (stats Stats, err error) { +// See HealthStats.Issue347OutsideChunks for details. +func GatherIndexHealthStats(logger log.Logger, fn string, minTime int64, maxTime int64) (stats HealthStats, err error) { r, err := index.NewFileReader(fn) if err != nil { return stats, errors.Wrap(err, "open index file") @@ -154,8 +217,26 @@ func GatherIndexIssueStats(logger log.Logger, fn string, minTime int64, maxTime lastLset labels.Labels lset labels.Labels chks []chunks.Meta + + seriesLifeDuration = newMinMaxSumInt64() + seriesLifeDurationWithoutSingleSampleSeries = newMinMaxSumInt64() + seriesChunks = newMinMaxSumInt64() + chunkDuration = newMinMaxSumInt64() + chunkSize = newMinMaxSumInt64() ) + lnames, err := r.LabelNames() + if err != nil { + return stats, errors.Wrap(err, "label names") + } + stats.LabelNamesCount = int64(len(lnames)) + + lvals, err := r.LabelValues("__name__") + if err != nil { + return stats, errors.Wrap(err, "metric label values") + } + stats.MetricLabelValuesCount = int64(len(lvals)) + // Per series. for p.Next() { lastLset = append(lastLset[:0], lset...) @@ -189,8 +270,23 @@ func GatherIndexIssueStats(logger log.Logger, fn string, minTime int64, maxTime } ooo := 0 + seriesLifeTimeMs := int64(0) // Per chunk in series. for i, c := range chks { + stats.TotalChunks++ + + chkDur := c.MaxTime - c.MinTime + seriesLifeTimeMs += chkDur + chunkDuration.Add(chkDur) + if chkDur == 0 { + stats.SingleSampleChunks++ + } + + // Approximate size. + if i < len(chks)-2 { + chunkSize.Add(int64(chks[i+1].Ref - c.Ref)) + } + // Chunk vs the block ranges. if c.MinTime < minTime || c.MaxTime > maxTime { stats.OutsideChunks++ @@ -226,11 +322,39 @@ func GatherIndexIssueStats(logger log.Logger, fn string, minTime int64, maxTime stats.OutOfOrderSeries++ stats.OutOfOrderChunks += ooo } + + seriesChunks.Add(int64(len(chks))) + seriesLifeDuration.Add(seriesLifeTimeMs) + + if seriesLifeTimeMs == 0 { + stats.SingleSampleSeries++ + } else { + seriesLifeDurationWithoutSingleSampleSeries.Add(seriesLifeTimeMs) + } } if p.Err() != nil { return stats, errors.Wrap(err, "walk postings") } + stats.SeriesMaxLifeDuration = time.Duration(seriesLifeDuration.max) * time.Millisecond + stats.SeriesAvgLifeDuration = time.Duration(seriesLifeDuration.Avg()) * time.Millisecond + stats.SeriesMinLifeDuration = time.Duration(seriesLifeDuration.min) * time.Millisecond + + stats.SeriesMaxLifeDurationWithoutSingleSampleSeries = time.Duration(seriesLifeDurationWithoutSingleSampleSeries.max) * time.Millisecond + stats.SeriesAvgLifeDurationWithoutSingleSampleSeries = time.Duration(seriesLifeDurationWithoutSingleSampleSeries.Avg()) * time.Millisecond + stats.SeriesMinLifeDurationWithoutSingleSampleSeries = time.Duration(seriesLifeDurationWithoutSingleSampleSeries.min) * time.Millisecond + + stats.SeriesMaxChunks = seriesChunks.max + stats.SeriesAvgChunks = seriesChunks.Avg() + stats.SeriesMinChunks = seriesChunks.min + + stats.ChunkMaxSize = chunkSize.max + stats.ChunkAvgSize = chunkSize.Avg() + stats.ChunkMinSize = chunkSize.min + + stats.ChunkMaxDuration = time.Duration(chunkDuration.max) * time.Millisecond + stats.ChunkAvgDuration = time.Duration(chunkDuration.Avg()) * time.Millisecond + stats.ChunkMinDuration = time.Duration(chunkDuration.min) * time.Millisecond return stats, nil } @@ -252,7 +376,7 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source metadata.SourceT entropy := rand.New(rand.NewSource(time.Now().UnixNano())) resid = ulid.MustNew(ulid.Now(), entropy) - meta, err := metadata.Read(bdir) + meta, err := metadata.ReadFromDir(bdir) if err != nil { return resid, errors.Wrap(err, "read meta file") } @@ -303,12 +427,12 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source metadata.SourceT return resid, errors.Wrap(err, "rewrite block") } resmeta.Thanos.SegmentFiles = GetSegmentFiles(resdir) - if err := metadata.Write(logger, resdir, &resmeta); err != nil { + if err := resmeta.WriteToDir(logger, resdir); err != nil { return resid, err } // TSDB may rewrite metadata in bdir. // TODO: This is not needed in newer TSDB code. See https://github.com/prometheus/tsdb/pull/637. - if err := metadata.Write(logger, bdir, meta); err != nil { + if err := meta.WriteToDir(logger, bdir); err != nil { return resid, err } return resid, nil @@ -435,9 +559,9 @@ func rewrite( series = []seriesRepair{} ) + var lset labels.Labels + var chks []chunks.Meta for all.Next() { - var lset labels.Labels - var chks []chunks.Meta id := all.At() if err := indexr.Series(id, &lset, &chks); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 42c33767f73f8..20ae1c5bc9284 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -637,8 +637,8 @@ func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { }, nil } -func (r BinaryReader) IndexVersion() int { - return r.indexVersion +func (r BinaryReader) IndexVersion() (int, error) { + return r.indexVersion, nil } // TODO(bwplotka): Get advantage of multi value offset fetch. @@ -871,7 +871,7 @@ func yoloString(b []byte) string { return *((*string)(unsafe.Pointer(&b))) } -func (r BinaryReader) LabelNames() []string { +func (r BinaryReader) LabelNames() ([]string, error) { allPostingsKeyName, _ := index.AllPostingsKey() labelNames := make([]string, 0, len(r.postings)) for name := range r.postings { @@ -882,7 +882,7 @@ func (r BinaryReader) LabelNames() []string { labelNames = append(labelNames, name) } sort.Strings(labelNames) - return labelNames + return labelNames, nil } func (r *BinaryReader) Close() error { return r.c.Close() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go index dbbe335deb883..657427bd6f707 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/header.go @@ -18,7 +18,7 @@ type Reader interface { io.Closer // IndexVersion returns version of index. - IndexVersion() int + IndexVersion() (int, error) // PostingsOffset returns start and end offsets of postings for given name and value. // The end offset might be bigger than the actual posting ending, but not larger than the whole index file. @@ -36,5 +36,5 @@ type Reader interface { LabelValues(name string) ([]string, error) // LabelNames returns all label names. - LabelNames() []string + LabelNames() ([]string, error) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go new file mode 100644 index 0000000000000..e9b9dc20bdc6b --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -0,0 +1,273 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package indexheader + +import ( + "context" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/tsdb/index" + "go.uber.org/atomic" + + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// LazyBinaryReaderMetrics holds metrics tracked by LazyBinaryReader. +type LazyBinaryReaderMetrics struct { + loadCount prometheus.Counter + loadFailedCount prometheus.Counter + unloadCount prometheus.Counter + unloadFailedCount prometheus.Counter + loadDuration prometheus.Histogram +} + +// NewLazyBinaryReaderMetrics makes new LazyBinaryReaderMetrics. +func NewLazyBinaryReaderMetrics(reg prometheus.Registerer) *LazyBinaryReaderMetrics { + return &LazyBinaryReaderMetrics{ + loadCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_load_total", + Help: "Total number of index-header lazy load operations.", + }), + loadFailedCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_load_failed_total", + Help: "Total number of failed index-header lazy load operations.", + }), + unloadCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_unload_total", + Help: "Total number of index-header lazy unload operations.", + }), + unloadFailedCount: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "indexheader_lazy_unload_failed_total", + Help: "Total number of failed index-header lazy unload operations.", + }), + loadDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "indexheader_lazy_load_duration_seconds", + Help: "Duration of the index-header lazy loading in seconds.", + Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5}, + }), + } +} + +// LazyBinaryReader wraps BinaryReader and loads (mmap) the index-header only upon +// the first Reader function is called. +type LazyBinaryReader struct { + ctx context.Context + logger log.Logger + bkt objstore.BucketReader + dir string + filepath string + id ulid.ULID + postingOffsetsInMemSampling int + metrics *LazyBinaryReaderMetrics + onClosed func(*LazyBinaryReader) + + readerMx sync.RWMutex + reader *BinaryReader + readerErr error + + // Keep track of the last time it was used. + usedAt *atomic.Int64 +} + +// NewLazyBinaryReader makes a new LazyBinaryReader. If the index-header does not exist +// on the local disk at dir location, this function will build it downloading required +// sections from the full index stored in the bucket. However, this function doesn't load +// (mmap) the index-header; it will be loaded at first Reader function call. +func NewLazyBinaryReader( + ctx context.Context, + logger log.Logger, + bkt objstore.BucketReader, + dir string, + id ulid.ULID, + postingOffsetsInMemSampling int, + metrics *LazyBinaryReaderMetrics, + onClosed func(*LazyBinaryReader), +) (*LazyBinaryReader, error) { + filepath := filepath.Join(dir, id.String(), block.IndexHeaderFilename) + + // If the index-header doesn't exist we should download it. + if _, err := os.Stat(filepath); err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "read index header") + } + + level.Debug(logger).Log("msg", "the index-header doesn't exist on disk; recreating", "path", filepath) + + start := time.Now() + if err := WriteBinary(ctx, bkt, id, filepath); err != nil { + return nil, errors.Wrap(err, "write index header") + } + + level.Debug(logger).Log("msg", "built index-header file", "path", filepath, "elapsed", time.Since(start)) + } + + return &LazyBinaryReader{ + ctx: ctx, + logger: logger, + bkt: bkt, + dir: dir, + filepath: filepath, + id: id, + postingOffsetsInMemSampling: postingOffsetsInMemSampling, + metrics: metrics, + usedAt: atomic.NewInt64(time.Now().UnixNano()), + onClosed: onClosed, + }, nil +} + +// Close implements Reader. It unloads the index-header from memory (releasing the mmap +// area), but a subsequent call to any other Reader function will automatically reload it. +func (r *LazyBinaryReader) Close() error { + if r.onClosed != nil { + defer r.onClosed(r) + } + + return r.unload() +} + +// IndexVersion implements Reader. +func (r *LazyBinaryReader) IndexVersion() (int, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return 0, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.IndexVersion() +} + +// PostingsOffset implements Reader. +func (r *LazyBinaryReader) PostingsOffset(name string, value string) (index.Range, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return index.Range{}, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.PostingsOffset(name, value) +} + +// LookupSymbol implements Reader. +func (r *LazyBinaryReader) LookupSymbol(o uint32) (string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return "", err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LookupSymbol(o) +} + +// LabelValues implements Reader. +func (r *LazyBinaryReader) LabelValues(name string) ([]string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return nil, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LabelValues(name) +} + +// LabelNames implements Reader. +func (r *LazyBinaryReader) LabelNames() ([]string, error) { + r.readerMx.RLock() + defer r.readerMx.RUnlock() + + if err := r.load(); err != nil { + return nil, err + } + + r.usedAt.Store(time.Now().UnixNano()) + return r.reader.LabelNames() +} + +// load ensures the underlying binary index-header reader has been successfully loaded. Returns +// an error on failure. This function MUST be called with the read lock already acquired. +func (r *LazyBinaryReader) load() error { + // Nothing to do if we already tried loading it. + if r.reader != nil { + return nil + } + if r.readerErr != nil { + return r.readerErr + } + + // Take the write lock to ensure we'll try to load it only once. Take again + // the read lock once done. + r.readerMx.RUnlock() + r.readerMx.Lock() + defer r.readerMx.RLock() + defer r.readerMx.Unlock() + + // Ensure none else tried to load it in the meanwhile. + if r.reader != nil { + return nil + } + if r.readerErr != nil { + return r.readerErr + } + + level.Debug(r.logger).Log("msg", "lazy loading index-header file", "path", r.filepath) + r.metrics.loadCount.Inc() + startTime := time.Now() + + reader, err := NewBinaryReader(r.ctx, r.logger, r.bkt, r.dir, r.id, r.postingOffsetsInMemSampling) + if err != nil { + r.metrics.loadFailedCount.Inc() + r.readerErr = err + return errors.Wrapf(err, "lazy load index-header file at %s", r.filepath) + } + + r.reader = reader + level.Debug(r.logger).Log("msg", "lazy loaded index-header file", "path", r.filepath, "elapsed", time.Since(startTime)) + r.metrics.loadDuration.Observe(time.Since(startTime).Seconds()) + + return nil +} + +// unload closes underlying BinaryReader. Calling this function on a already unloaded reader is a no-op. +func (r *LazyBinaryReader) unload() error { + // Always update the used timestamp so that the pool will not call unload() again until the next + // idle timeout is hit. + r.usedAt.Store(time.Now().UnixNano()) + + r.readerMx.Lock() + defer r.readerMx.Unlock() + + if r.reader == nil { + return nil + } + + r.metrics.unloadCount.Inc() + if err := r.reader.Close(); err != nil { + r.metrics.unloadFailedCount.Inc() + return err + } + + r.reader = nil + return nil +} + +func (r *LazyBinaryReader) lastUsedAt() int64 { + return r.usedAt.Load() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go new file mode 100644 index 0000000000000..660ae4853a394 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -0,0 +1,147 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package indexheader + +import ( + "context" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + + "github.com/thanos-io/thanos/pkg/objstore" +) + +// ReaderPool is used to istantiate new index-header readers and keep track of them. +// When the lazy reader is enabled, the pool keeps track of all instantiated readers +// and automatically close them once the idle timeout is reached. A closed lazy reader +// will be automatically re-opened upon next usage. +type ReaderPool struct { + lazyReaderEnabled bool + lazyReaderIdleTimeout time.Duration + lazyReaderMetrics *LazyBinaryReaderMetrics + logger log.Logger + + // Channel used to signal once the pool is closing. + close chan struct{} + + // Keep track of all readers managed by the pool. + lazyReadersMx sync.Mutex + lazyReaders map[*LazyBinaryReader]struct{} +} + +// NewReaderPool makes a new ReaderPool. +func NewReaderPool(logger log.Logger, lazyReaderEnabled bool, lazyReaderIdleTimeout time.Duration, reg prometheus.Registerer) *ReaderPool { + p := &ReaderPool{ + logger: logger, + lazyReaderEnabled: lazyReaderEnabled, + lazyReaderIdleTimeout: lazyReaderIdleTimeout, + lazyReaderMetrics: NewLazyBinaryReaderMetrics(reg), + lazyReaders: make(map[*LazyBinaryReader]struct{}), + close: make(chan struct{}), + } + + // Start a goroutine to close idle readers (only if required). + if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 { + checkFreq := p.lazyReaderIdleTimeout / 10 + + go func() { + for { + select { + case <-p.close: + return + case <-time.After(checkFreq): + p.closeIdleReaders() + } + } + }() + } + + return p +} + +// NewBinaryReader creates and returns a new binary reader. If the pool has been configured +// with lazy reader enabled, this function will return a lazy reader. The returned lazy reader +// is tracked by the pool and automatically closed once the idle timeout expires. +func (p *ReaderPool) NewBinaryReader(ctx context.Context, logger log.Logger, bkt objstore.BucketReader, dir string, id ulid.ULID, postingOffsetsInMemSampling int) (Reader, error) { + var reader Reader + var err error + + if p.lazyReaderEnabled { + reader, err = NewLazyBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling, p.lazyReaderMetrics, p.onLazyReaderClosed) + } else { + reader, err = NewBinaryReader(ctx, logger, bkt, dir, id, postingOffsetsInMemSampling) + } + + if err != nil { + return nil, err + } + + // Keep track of lazy readers only if required. + if p.lazyReaderEnabled && p.lazyReaderIdleTimeout > 0 { + p.lazyReadersMx.Lock() + p.lazyReaders[reader.(*LazyBinaryReader)] = struct{}{} + p.lazyReadersMx.Unlock() + } + + return reader, err +} + +// Close the pool and stop checking for idle readers. No reader tracked by this pool +// will be closed. It's the caller responsibility to close readers. +func (p *ReaderPool) Close() { + close(p.close) +} + +func (p *ReaderPool) closeIdleReaders() { + for _, r := range p.getIdleReaders() { + // Closing an already closed reader is a no-op, so we close it and just update + // the last timestamp on success. If it will be still be idle the next time this + // function is called, we'll try to close it again and will just be a no-op. + // + // Due to concurrency, the current implementation may close a reader which was + // use between when the list of idle readers has been computed and now. This is + // an edge case we're willing to accept, to not further complicate the logic. + if err := r.unload(); err != nil { + level.Warn(p.logger).Log("msg", "failed to close idle index-header reader", "err", err) + } + } +} + +func (p *ReaderPool) getIdleReaders() []*LazyBinaryReader { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + var idle []*LazyBinaryReader + threshold := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() + + for r := range p.lazyReaders { + if r.lastUsedAt() < threshold { + idle = append(idle, r) + } + } + + return idle +} + +func (p *ReaderPool) isTracking(r *LazyBinaryReader) bool { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + _, ok := p.lazyReaders[r] + return ok +} + +func (p *ReaderPool) onLazyReaderClosed(r *LazyBinaryReader) { + p.lazyReadersMx.Lock() + defer p.lazyReadersMx.Unlock() + + // When this function is called, it means the reader has been closed NOT because was idle + // but because the consumer closed it. By contract, a reader closed by the consumer can't + // be used anymore, so we can automatically remove it from the pool. + delete(p.lazyReaders, r) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go deleted file mode 100644 index 5f2a9f04adc86..0000000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/deletionmark.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package metadata - -import ( - "context" - "encoding/json" - "io/ioutil" - "path" - - "github.com/go-kit/kit/log" - "github.com/oklog/ulid" - "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - // DeletionMarkFilename is the known json filename to store details about when block is marked for deletion. - DeletionMarkFilename = "deletion-mark.json" - - // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. - DeletionMarkVersion1 = 1 -) - -// ErrorDeletionMarkNotFound is the error when deletion-mark.json file is not found. -var ErrorDeletionMarkNotFound = errors.New("deletion-mark.json not found") - -// ErrorUnmarshalDeletionMark is the error when unmarshalling deletion-mark.json file. -// This error can occur because deletion-mark.json has been partially uploaded to block storage -// or the deletion-mark.json file is not a valid json file. -var ErrorUnmarshalDeletionMark = errors.New("unmarshal deletion-mark.json") - -// DeletionMark stores block id and when block was marked for deletion. -type DeletionMark struct { - // ID of the tsdb block. - ID ulid.ULID `json:"id"` - - // DeletionTime is a unix timestamp of when the block was marked to be deleted. - DeletionTime int64 `json:"deletion_time"` - - // Version of the file. - Version int `json:"version"` -} - -// ReadDeletionMark reads the given deletion mark file from /deletion-mark.json in bucket. -func ReadDeletionMark(ctx context.Context, bkt objstore.InstrumentedBucketReader, logger log.Logger, dir string) (*DeletionMark, error) { - deletionMarkFile := path.Join(dir, DeletionMarkFilename) - - r, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, deletionMarkFile) - if err != nil { - if bkt.IsObjNotFoundErr(err) { - return nil, ErrorDeletionMarkNotFound - } - return nil, errors.Wrapf(err, "get file: %s", deletionMarkFile) - } - - defer runutil.CloseWithLogOnErr(logger, r, "close bkt deletion-mark reader") - - metaContent, err := ioutil.ReadAll(r) - if err != nil { - return nil, errors.Wrapf(err, "read file: %s", deletionMarkFile) - } - - deletionMark := DeletionMark{} - if err := json.Unmarshal(metaContent, &deletionMark); err != nil { - return nil, errors.Wrapf(ErrorUnmarshalDeletionMark, "file: %s; err: %v", deletionMarkFile, err.Error()) - } - - if deletionMark.Version != DeletionMarkVersion1 { - return nil, errors.Errorf("unexpected deletion-mark file version %d", deletionMark.Version) - } - - return &deletionMark, nil -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go new file mode 100644 index 0000000000000..b3c8b9d1f054b --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/markers.go @@ -0,0 +1,119 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "context" + "encoding/json" + "io/ioutil" + "path" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" +) + +const ( + // DeletionMarkFilename is the known json filename for optional file storing details about when block is marked for deletion. + // If such file is present in block dir, it means the block is meant to be deleted after certain delay. + DeletionMarkFilename = "deletion-mark.json" + // NoCompactMarkFilename is the known json filename for optional file storing details about why block has to be excluded from compaction. + // If such file is present in block dir, it means the block has to excluded from compaction (both vertical and horizontal) or rewrite (e.g deletions). + NoCompactMarkFilename = "no-compact-mark.json" + + // DeletionMarkVersion1 is the version of deletion-mark file supported by Thanos. + DeletionMarkVersion1 = 1 + // NoCompactMarkVersion1 is the version of no-compact-mark file supported by Thanos. + NoCompactMarkVersion1 = 1 +) + +var ( + // ErrorMarkerNotFound is the error when marker file is not found. + ErrorMarkerNotFound = errors.New("marker not found") + // ErrorUnmarshalMarker is the error when unmarshalling marker JSON file. + // This error can occur because marker has been partially uploaded to block storage + // or the marker file is not a valid json file. + ErrorUnmarshalMarker = errors.New("unmarshal marker JSON") +) + +type Marker interface { + markerFilename() string +} + +// DeletionMark stores block id and when block was marked for deletion. +type DeletionMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // DeletionTime is a unix timestamp of when the block was marked to be deleted. + DeletionTime int64 `json:"deletion_time"` +} + +func (m *DeletionMark) markerFilename() string { return DeletionMarkFilename } + +// NoCompactReason is a reason for a block to be excluded from compaction. +type NoCompactReason string + +const ( + // ManualNoCompactReason is a custom reason of excluding from compaction that should be added when no-compact mark is added for unknown/user specified reason. + ManualNoCompactReason NoCompactReason = "manual" + // IndexSizeExceedingNoCompactReason is a reason of index being too big (for example exceeding 64GB limit: https://github.com/thanos-io/thanos/issues/1424) + // This reason can be ignored when vertical block sharding will be implemented. + IndexSizeExceedingNoCompactReason = "index-size-exceeding" +) + +// NoCompactMark marker stores reason of block being excluded from compaction if needed. +type NoCompactMark struct { + // ID of the tsdb block. + ID ulid.ULID `json:"id"` + // Version of the file. + Version int `json:"version"` + // Details is a human readable string giving details of reason. + Details string `json:"details,omitempty"` + + // NoCompactTime is a unix timestamp of when the block was marked for no compact. + NoCompactTime int64 `json:"no_compact_time"` + Reason NoCompactReason `json:"reason"` +} + +func (n *NoCompactMark) markerFilename() string { return NoCompactMarkFilename } + +// ReadMarker reads the given mark file from /.json in bucket. +func ReadMarker(ctx context.Context, logger log.Logger, bkt objstore.InstrumentedBucketReader, dir string, marker Marker) error { + markerFile := path.Join(dir, marker.markerFilename()) + r, err := bkt.ReaderWithExpectedErrs(bkt.IsObjNotFoundErr).Get(ctx, markerFile) + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return ErrorMarkerNotFound + } + return errors.Wrapf(err, "get file: %s", markerFile) + } + defer runutil.CloseWithLogOnErr(logger, r, "close bkt marker reader") + + metaContent, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "read file: %s", markerFile) + } + + if err := json.Unmarshal(metaContent, marker); err != nil { + return errors.Wrapf(ErrorUnmarshalMarker, "file: %s; err: %v", markerFile, err.Error()) + } + switch marker.markerFilename() { + case NoCompactMarkFilename: + if version := marker.(*NoCompactMark).Version; version != NoCompactMarkVersion1 { + return errors.Errorf("unexpected no-compact-mark file version %d, expected %d", version, NoCompactMarkVersion1) + } + case DeletionMarkFilename: + if version := marker.(*DeletionMark).Version; version != DeletionMarkVersion1 { + return errors.Errorf("unexpected deletion-mark file version %d, expected %d", version, DeletionMarkVersion1) + } + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go index c361b703e74a7..b47c28eba492a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -10,20 +10,27 @@ package metadata import ( "encoding/json" - "io/ioutil" + "fmt" + "io" "os" "path/filepath" "github.com/go-kit/kit/log" + "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/thanos-io/thanos/pkg/runutil" + "gopkg.in/yaml.v3" ) type SourceType string const ( + // TODO(bwplotka): Merge with pkg/component package. UnknownSource SourceType = "" SidecarSource SourceType = "sidecar" ReceiveSource SourceType = "receive" @@ -31,17 +38,17 @@ const ( CompactorRepairSource SourceType = "compactor.repair" RulerSource SourceType = "ruler" BucketRepairSource SourceType = "bucket.repair" + BucketRewriteSource SourceType = "bucket.rewrite" TestSource SourceType = "test" ) const ( // MetaFilename is the known JSON filename for meta information. MetaFilename = "meta.json" -) - -const ( - // MetaVersion is a enumeration of meta versions supported by Thanos. - MetaVersion1 = iota + 1 + // TSDBVersion1 is a enumeration of TSDB meta versions supported by Thanos. + TSDBVersion1 = 1 + // ThanosVersion1 is a enumeration of Thanos section of TSDB meta supported by Thanos. + ThanosVersion1 = 1 ) // Meta describes the a block's meta. It wraps the known TSDB meta structure and @@ -52,8 +59,17 @@ type Meta struct { Thanos Thanos `json:"thanos"` } +func (m *Meta) String() string { + return fmt.Sprintf("%s (min time: %d, max time: %d)", m.ULID, m.MinTime, m.MaxTime) +} + // Thanos holds block meta information specific to Thanos. type Thanos struct { + // Version of Thanos meta file. If none specified, 1 is assumed (since first version did not have explicit version specified). + Version int `json:"version,omitempty"` + + // Labels are the external labels identifying the producer as well as tenant. + // See https://thanos.io/tip/thanos/storage.md#external-labels for details. Labels map[string]string `json:"labels"` Downsample ThanosDownsample `json:"downsample"` @@ -61,7 +77,45 @@ type Thanos struct { Source SourceType `json:"source"` // List of segment files (in chunks directory), in sorted order. Optional. + // Deprecated. Use Files instead. SegmentFiles []string `json:"segment_files,omitempty"` + + // File is a sorted (by rel path) list of all files in block directory of this block known to TSDB. + // Sorted by relative path. + // Useful to avoid API call to get size of each file, as well as for debugging purposes. + // Optional, added in v0.17.0. + Files []File `json:"files,omitempty"` + + // Rewrites is present when any rewrite (deletion, relabel etc) were applied to this block. Optional. + Rewrites []Rewrite `json:"rewrites,omitempty"` +} + +type Rewrite struct { + // ULIDs of all source head blocks that went into the block. + Sources []ulid.ULID `json:"sources,omitempty"` + // Deletions if applied (in order). + DeletionsApplied []DeletionRequest `json:"deletions_applied,omitempty"` +} + +type Matchers []*labels.Matcher + +func (m *Matchers) UnmarshalYAML(value *yaml.Node) (err error) { + *m, err = parser.ParseMetricSelector(value.Value) + if err != nil { + return errors.Wrapf(err, "parse metric selector %v", value.Value) + } + return nil +} + +type DeletionRequest struct { + Matchers Matchers `json:"matchers" yaml:"matchers"` + Intervals tombstones.Intervals `json:"intervals,omitempty" yaml:"intervals,omitempty"` +} + +type File struct { + RelPath string `json:"rel_path"` + // SizeBytes is optional (e.g meta.json does not show size). + SizeBytes int64 `json:"size_bytes,omitempty"` } type ThanosDownsample struct { @@ -71,7 +125,7 @@ type ThanosDownsample struct { // InjectThanos sets Thanos meta to the block meta JSON and saves it to the disk. // NOTE: It should be used after writing any block by any Thanos component, otherwise we will miss crucial metadata. func InjectThanos(logger log.Logger, bdir string, meta Thanos, downsampledMeta *tsdb.BlockMeta) (*Meta, error) { - newMeta, err := Read(bdir) + newMeta, err := ReadFromDir(bdir) if err != nil { return nil, errors.Wrap(err, "read new meta") } @@ -82,15 +136,15 @@ func InjectThanos(logger log.Logger, bdir string, meta Thanos, downsampledMeta * newMeta.Compaction = downsampledMeta.Compaction } - if err := Write(logger, bdir, newMeta); err != nil { + if err := newMeta.WriteToDir(logger, bdir); err != nil { return nil, errors.Wrap(err, "write new meta") } return newMeta, nil } -// Write writes the given meta into /meta.json. -func Write(logger log.Logger, dir string, meta *Meta) error { +// WriteToDir writes the encoded meta into /meta.json. +func (m Meta) WriteToDir(logger log.Logger, dir string) error { // Make any changes to the file appear atomic. path := filepath.Join(dir, MetaFilename) tmp := path + ".tmp" @@ -100,10 +154,7 @@ func Write(logger log.Logger, dir string, meta *Meta) error { return err } - enc := json.NewEncoder(f) - enc.SetIndent("", "\t") - - if err := enc.Encode(meta); err != nil { + if err := m.Write(f); err != nil { runutil.CloseWithLogOnErr(logger, f, "close meta") return err } @@ -113,6 +164,13 @@ func Write(logger log.Logger, dir string, meta *Meta) error { return renameFile(logger, tmp, path) } +// Write writes the given encoded meta to writer. +func (m Meta) Write(w io.Writer) error { + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + return enc.Encode(&m) +} + func renameFile(logger log.Logger, from, to string) error { if err := os.RemoveAll(to); err != nil { return err @@ -134,19 +192,41 @@ func renameFile(logger log.Logger, from, to string) error { return pdir.Close() } -// Read reads the given meta from /meta.json. -func Read(dir string) (*Meta, error) { - b, err := ioutil.ReadFile(filepath.Join(dir, MetaFilename)) +// ReadFromDir reads the given meta from /meta.json. +func ReadFromDir(dir string) (*Meta, error) { + f, err := os.Open(filepath.Join(dir, MetaFilename)) if err != nil { return nil, err } - var m Meta + return Read(f) +} + +// Read the block meta from the given reader. +func Read(rc io.ReadCloser) (_ *Meta, err error) { + defer runutil.ExhaustCloseWithErrCapture(&err, rc, "close meta JSON") - if err := json.Unmarshal(b, &m); err != nil { + var m Meta + if err = json.NewDecoder(rc).Decode(&m); err != nil { return nil, err } - if m.Version != MetaVersion1 { + + if m.Version != TSDBVersion1 { return nil, errors.Errorf("unexpected meta file version %d", m.Version) } + + version := m.Thanos.Version + if version == 0 { + // For compatibility. + version = ThanosVersion1 + } + + if version != ThanosVersion1 { + return nil, errors.Errorf("unexpected meta file Thanos section version %d", m.Version) + } + + if m.Thanos.Labels == nil { + // To avoid extra nil checks, allocate map here if empty. + m.Thanos.Labels = make(map[string]string) + } return &m, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/writer.go b/vendor/github.com/thanos-io/thanos/pkg/block/writer.go new file mode 100644 index 0000000000000..995d8f72aedf6 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/writer.go @@ -0,0 +1,184 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package block + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" +) + +// Reader is like tsdb.BlockReader but without tombstones and size methods. +type Reader interface { + // Index returns an IndexReader over the block's data. + Index() (tsdb.IndexReader, error) + + // Chunks returns a ChunkReader over the block's data. + Chunks() (tsdb.ChunkReader, error) + + // Meta returns block metadata file. + Meta() tsdb.BlockMeta +} + +// SeriesWriter is interface for writing series into one or multiple Blocks. +// Statistics has to be counted by implementation. +type SeriesWriter interface { + tsdb.IndexWriter + tsdb.ChunkWriter +} + +// Writer is interface for creating block(s). +type Writer interface { + SeriesWriter + + Flush() (tsdb.BlockStats, error) +} + +type DiskWriter struct { + statsGatheringSeriesWriter + + bTmp, bDir string + logger log.Logger + closers []io.Closer +} + +const tmpForCreationBlockDirSuffix = ".tmp-for-creation" + +// NewDiskWriter allows to write single TSDB block to disk and returns statistics. +// Destination block directory has to exists. +func NewDiskWriter(ctx context.Context, logger log.Logger, bDir string) (_ *DiskWriter, err error) { + bTmp := bDir + tmpForCreationBlockDirSuffix + + d := &DiskWriter{ + bTmp: bTmp, + bDir: bDir, + logger: logger, + } + defer func() { + if err != nil { + err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(d.closers)).Err() + if err := os.RemoveAll(bTmp); err != nil { + level.Error(logger).Log("msg", "removed tmp folder after failed compaction", "err", err.Error()) + } + } + }() + + if err = os.RemoveAll(bTmp); err != nil { + return nil, err + } + if err = os.MkdirAll(bTmp, 0777); err != nil { + return nil, err + } + + chunkw, err := chunks.NewWriter(filepath.Join(bTmp, ChunksDirname)) + if err != nil { + return nil, errors.Wrap(err, "open chunk writer") + } + d.closers = append(d.closers, chunkw) + + // TODO(bwplotka): Setup instrumentedChunkWriter if we want to upstream this code. + + indexw, err := index.NewWriter(ctx, filepath.Join(bTmp, IndexFilename)) + if err != nil { + return nil, errors.Wrap(err, "open index writer") + } + d.closers = append(d.closers, indexw) + d.statsGatheringSeriesWriter = statsGatheringSeriesWriter{iw: indexw, cw: chunkw} + return d, nil +} + +func (d *DiskWriter) Flush() (_ tsdb.BlockStats, err error) { + defer func() { + if err != nil { + err = tsdb_errors.NewMulti(err, tsdb_errors.CloseAll(d.closers)).Err() + if err := os.RemoveAll(d.bTmp); err != nil { + level.Error(d.logger).Log("msg", "removed tmp folder failed after block(s) write", "err", err.Error()) + } + } + }() + df, err := fileutil.OpenDir(d.bTmp) + if err != nil { + return tsdb.BlockStats{}, errors.Wrap(err, "open temporary block dir") + } + defer func() { + if df != nil { + err = tsdb_errors.NewMulti(err, df.Close()).Err() + } + }() + + if err := df.Sync(); err != nil { + return tsdb.BlockStats{}, errors.Wrap(err, "sync temporary dir file") + } + + // Close temp dir before rename block dir (for windows platform). + if err = df.Close(); err != nil { + return tsdb.BlockStats{}, errors.Wrap(err, "close temporary dir") + } + df = nil + + if err := tsdb_errors.CloseAll(d.closers); err != nil { + d.closers = nil + return tsdb.BlockStats{}, err + } + d.closers = nil + + // Block files successfully written, make them visible by moving files from tmp dir. + if err := fileutil.Replace(filepath.Join(d.bTmp, IndexFilename), filepath.Join(d.bDir, IndexFilename)); err != nil { + return tsdb.BlockStats{}, errors.Wrap(err, "replace index file") + } + if err := fileutil.Replace(filepath.Join(d.bTmp, ChunksDirname), filepath.Join(d.bDir, ChunksDirname)); err != nil { + return tsdb.BlockStats{}, errors.Wrap(err, "replace chunks dir") + } + return d.stats, nil +} + +type statsGatheringSeriesWriter struct { + iw tsdb.IndexWriter + cw tsdb.ChunkWriter + + stats tsdb.BlockStats + symbols int64 +} + +func (s *statsGatheringSeriesWriter) AddSymbol(sym string) error { + if err := s.iw.AddSymbol(sym); err != nil { + return err + } + s.symbols++ + return nil +} + +func (s *statsGatheringSeriesWriter) AddSeries(ref uint64, l labels.Labels, chks ...chunks.Meta) error { + if err := s.iw.AddSeries(ref, l, chks...); err != nil { + return err + } + s.stats.NumSeries++ + return nil +} + +func (s *statsGatheringSeriesWriter) WriteChunks(chks ...chunks.Meta) error { + if err := s.cw.WriteChunks(chks...); err != nil { + return err + } + s.stats.NumChunks += uint64(len(chks)) + for _, chk := range chks { + s.stats.NumSamples += uint64(chk.Chunk.NumSamples()) + } + return nil +} + +func (s statsGatheringSeriesWriter) Close() error { + return tsdb_errors.NewMulti(s.iw.Close(), s.cw.Close()).Err() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 74e971d39a753..1046057039c2e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "io/ioutil" + "math" "os" "path/filepath" "sort" @@ -21,11 +22,12 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" - terrors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/objstore" ) @@ -181,7 +183,7 @@ func (s *Syncer) GarbageCollect(ctx context.Context) error { delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) level.Info(s.logger).Log("msg", "marking outdated block for deletion", "block", id) - err := block.MarkForDeletion(delCtx, s.logger, s.bkt, id, s.metrics.blocksMarkedForDeletion) + err := block.MarkForDeletion(delCtx, s.logger, s.bkt, id, "outdated block", s.metrics.blocksMarkedForDeletion) cancel() if err != nil { s.metrics.garbageCollectionFailures.Inc() @@ -322,7 +324,7 @@ type Group struct { labels labels.Labels resolution int64 mtx sync.Mutex - blocks map[ulid.ULID]*metadata.Meta + metasByMinTime []*metadata.Meta acceptMalformedIndex bool enableVerticalCompaction bool compactions prometheus.Counter @@ -360,7 +362,6 @@ func NewGroup( key: key, labels: lset, resolution: resolution, - blocks: map[ulid.ULID]*metadata.Meta{}, acceptMalformedIndex: acceptMalformedIndex, enableVerticalCompaction: enableVerticalCompaction, compactions: compactions, @@ -390,7 +391,11 @@ func (cg *Group) Add(meta *metadata.Meta) error { if cg.resolution != meta.Thanos.Downsample.Resolution { return errors.New("block and group resolution do not match") } - cg.blocks[meta.ULID] = meta + + cg.metasByMinTime = append(cg.metasByMinTime, meta) + sort.Slice(cg.metasByMinTime, func(i, j int) bool { + return cg.metasByMinTime[i].MinTime < cg.metasByMinTime[j].MinTime + }) return nil } @@ -399,8 +404,8 @@ func (cg *Group) IDs() (ids []ulid.ULID) { cg.mtx.Lock() defer cg.mtx.Unlock() - for id := range cg.blocks { - ids = append(ids, id) + for _, m := range cg.metasByMinTime { + ids = append(ids, m.ULID) } sort.Slice(ids, func(i, j int) bool { return ids[i].Compare(ids[j]) < 0 @@ -413,13 +418,10 @@ func (cg *Group) MinTime() int64 { cg.mtx.Lock() defer cg.mtx.Unlock() - min := int64(0) - for _, b := range cg.blocks { - if b.MinTime < min { - min = b.MinTime - } + if len(cg.metasByMinTime) > 0 { + return cg.metasByMinTime[0].MinTime } - return min + return math.MaxInt64 } // MaxTime returns the max time across all group's blocks. @@ -427,10 +429,10 @@ func (cg *Group) MaxTime() int64 { cg.mtx.Lock() defer cg.mtx.Unlock() - max := int64(0) - for _, b := range cg.blocks { - if b.MaxTime < max { - max = b.MaxTime + max := int64(math.MinInt64) + for _, m := range cg.metasByMinTime { + if m.MaxTime > max { + max = m.MaxTime } } return max @@ -446,9 +448,35 @@ func (cg *Group) Resolution() int64 { return cg.resolution } +// Planner returns blocks to compact. +type Planner interface { + // Plan returns a block directories of blocks that should be compacted into single one. + // The blocks can be overlapping. The provided metadata has to be ordered by minTime. + Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) +} + +// Compactor provides compaction against an underlying storage of time series data. +// This is similar to tsdb.Compactor just without Plan method. +// TODO(bwplotka): Split the Planner from Compactor on upstream as well, so we can import it. +type Compactor interface { + // Write persists a Block into a directory. + // No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. + Write(dest string, b tsdb.BlockReader, mint, maxt int64, parent *tsdb.BlockMeta) (ulid.ULID, error) + + // Compact runs compaction against the provided directories. Must + // only be called concurrently with results of Plan(). + // Can optionally pass a list of already open blocks, + // to avoid having to reopen them. + // When resulting Block has 0 samples + // * No block is written. + // * The source dirs are marked Deletable. + // * Returns empty ulid.ULID{}. + Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) +} + // Compact plans and runs a single compaction against the group. The compacted result // is uploaded into the bucket the blocks were retrieved from. -func (cg *Group) Compact(ctx context.Context, dir string, comp tsdb.Compactor) (shouldRerun bool, compID ulid.ULID, rerr error) { +func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, rerr error) { cg.compactionRunsStarted.Inc() subDir := filepath.Join(dir, cg.Key()) @@ -469,7 +497,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, comp tsdb.Compactor) ( return false, ulid.ULID{}, errors.Wrap(err, "create compaction group dir") } - shouldRerun, compID, err := cg.compact(ctx, subDir, comp) + shouldRerun, compID, err := cg.compact(ctx, subDir, planner, comp) if err != nil { cg.compactionFailures.Inc() return false, ulid.ULID{}, err @@ -515,7 +543,7 @@ func (e HaltError) Error() string { // IsHaltError returns true if the base error is a HaltError. // If a multierror is passed, any halt error will return true. func IsHaltError(err error) bool { - if multiErr, ok := errors.Cause(err).(terrors.MultiError); ok { + if multiErr, ok := errors.Cause(err).(errutil.MultiError); ok { for _, err := range multiErr { if _, ok := errors.Cause(err).(HaltError); ok { return true @@ -548,7 +576,7 @@ func (e RetryError) Error() string { // IsRetryError returns true if the base error is a RetryError. // If a multierror is passed, all errors must be retriable. func IsRetryError(err error) bool { - if multiErr, ok := errors.Cause(err).(terrors.MultiError); ok { + if multiErr, ok := errors.Cause(err).(errutil.MultiError); ok { for _, err := range multiErr { if _, ok := errors.Cause(err).(RetryError); !ok { return false @@ -561,22 +589,18 @@ func IsRetryError(err error) bool { return ok } -func (cg *Group) areBlocksOverlapping(include *metadata.Meta, excludeDirs ...string) error { +func (cg *Group) areBlocksOverlapping(include *metadata.Meta, exclude ...*metadata.Meta) error { var ( - metas []tsdb.BlockMeta - exclude = map[ulid.ULID]struct{}{} + metas []tsdb.BlockMeta + excludeMap = map[ulid.ULID]struct{}{} ) - for _, e := range excludeDirs { - id, err := ulid.Parse(filepath.Base(e)) - if err != nil { - return errors.Wrapf(err, "overlaps find dir %s", e) - } - exclude[id] = struct{}{} + for _, meta := range exclude { + excludeMap[meta.ULID] = struct{}{} } - for _, m := range cg.blocks { - if _, ok := exclude[m.ULID]; ok { + for _, m := range cg.metasByMinTime { + if _, ok := excludeMap[m.ULID]; ok { continue } metas = append(metas, m.BlockMeta) @@ -620,7 +644,7 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, return retry(errors.Wrapf(err, "download block %s", ie.id)) } - meta, err := metadata.Read(bdir) + meta, err := metadata.ReadFromDir(bdir) if err != nil { return errors.Wrapf(err, "read meta from %s", bdir) } @@ -647,13 +671,13 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, defer cancel() // TODO(bplotka): Issue with this will introduce overlap that will halt compactor. Automate that (fix duplicate overlaps caused by this). - if err := block.MarkForDeletion(delCtx, logger, bkt, ie.id, blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(delCtx, logger, bkt, ie.id, "source of repaired block", blocksMarkedForDeletion); err != nil { return errors.Wrapf(err, "marking old block %s for deletion has failed", ie.id) } return nil } -func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) (shouldRerun bool, compID ulid.ULID, err error) { +func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor) (shouldRerun bool, compID ulid.ULID, err error) { cg.mtx.Lock() defer cg.mtx.Unlock() @@ -669,29 +693,16 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( overlappingBlocks = true } - // Planning a compaction works purely based on the meta.json files in our future group's dir. - // So we first dump all our memory block metas into the directory. - for _, meta := range cg.blocks { - bdir := filepath.Join(dir, meta.ULID.String()) - if err := os.MkdirAll(bdir, 0777); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "create planning block dir") - } - if err := metadata.Write(cg.logger, bdir, meta); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "write planning meta file") - } - } - - // Plan against the written meta.json files. - plan, err := comp.Plan(dir) + toCompact, err := planner.Plan(ctx, cg.metasByMinTime) if err != nil { return false, ulid.ULID{}, errors.Wrap(err, "plan compaction") } - if len(plan) == 0 { + if len(toCompact) == 0 { // Nothing to do. return false, ulid.ULID{}, nil } - level.Info(cg.logger).Log("msg", "compaction available and planned; downloading blocks", "plan", fmt.Sprintf("%v", plan)) + level.Info(cg.logger).Log("msg", "compaction available and planned; downloading blocks", "plan", fmt.Sprintf("%v", toCompact)) // Due to #183 we verify that none of the blocks in the plan have overlapping sources. // This is one potential source of how we could end up with duplicated chunks. @@ -700,71 +711,54 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Once we have a plan we need to download the actual data. begin := time.Now() - for _, pdir := range plan { - meta, err := metadata.Read(pdir) - if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "read meta from %s", pdir) - } - + toCompactDirs := make([]string, 0, len(toCompact)) + for _, meta := range toCompact { + bdir := filepath.Join(dir, meta.ULID.String()) for _, s := range meta.Compaction.Sources { if _, ok := uniqueSources[s]; ok { - return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", plan)) + return false, ulid.ULID{}, halt(errors.Errorf("overlapping sources detected for plan %v", toCompact)) } uniqueSources[s] = struct{}{} } - id, err := ulid.Parse(filepath.Base(pdir)) - if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "plan dir %s", pdir) - } - - if meta.ULID.Compare(id) != 0 { - return false, ulid.ULID{}, errors.Errorf("mismatch between meta %s and dir %s", meta.ULID, id) - } - - if err := block.Download(ctx, cg.logger, cg.bkt, id, pdir); err != nil { - return false, ulid.ULID{}, retry(errors.Wrapf(err, "download block %s", id)) + if err := block.Download(ctx, cg.logger, cg.bkt, meta.ULID, bdir); err != nil { + return false, ulid.ULID{}, retry(errors.Wrapf(err, "download block %s", meta.ULID)) } // Ensure all input blocks are valid. - stats, err := block.GatherIndexIssueStats(cg.logger, filepath.Join(pdir, block.IndexFilename), meta.MinTime, meta.MaxTime) + stats, err := block.GatherIndexHealthStats(cg.logger, filepath.Join(bdir, block.IndexFilename), meta.MinTime, meta.MaxTime) if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "gather index issues for block %s", pdir) + return false, ulid.ULID{}, errors.Wrapf(err, "gather index issues for block %s", bdir) } if err := stats.CriticalErr(); err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", pdir, meta.Compaction.Level, meta.Thanos.Labels)) + return false, ulid.ULID{}, halt(errors.Wrapf(err, "block with not healthy index found %s; Compaction level %v; Labels: %v", bdir, meta.Compaction.Level, meta.Thanos.Labels)) } if err := stats.Issue347OutsideChunksErr(); err != nil { - return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", pdir), meta.ULID) + return false, ulid.ULID{}, issue347Error(errors.Wrapf(err, "invalid, but reparable block %s", bdir), meta.ULID) } if err := stats.PrometheusIssue5372Err(); !cg.acceptMalformedIndex && err != nil { return false, ulid.ULID{}, errors.Wrapf(err, - "block id %s, try running with --debug.accept-malformed-index", id) + "block id %s, try running with --debug.accept-malformed-index", meta.ULID) } + toCompactDirs = append(toCompactDirs, bdir) } - level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", plan), "duration", time.Since(begin)) + level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "plan", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin)) begin = time.Now() - - compID, err = comp.Compact(dir, plan, nil) + compID, err = comp.Compact(dir, toCompactDirs, nil) if err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", plan)) + return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) } if compID == (ulid.ULID{}) { // Prometheus compactor found that the compacted block would have no samples. - level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", fmt.Sprintf("%v", plan)) - for _, block := range plan { - meta, err := metadata.Read(block) - if err != nil { - level.Warn(cg.logger).Log("msg", "failed to read meta for block", "block", block) - continue - } + level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", fmt.Sprintf("%v", toCompactDirs)) + for _, meta := range toCompact { if meta.Stats.NumSamples == 0 { - if err := cg.deleteBlock(block); err != nil { - level.Warn(cg.logger).Log("msg", "failed to mark for deletion an empty block found during compaction", "block", block) + if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())); err != nil { + level.Warn(cg.logger).Log("msg", "failed to mark for deletion an empty block found during compaction", "block", meta.ULID) } } } @@ -776,7 +770,7 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( cg.verticalCompactions.Inc() } level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID, - "blocks", fmt.Sprintf("%v", plan), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks) + "blocks", fmt.Sprintf("%v", toCompactDirs), "duration", time.Since(begin), "overlapping_blocks", overlappingBlocks) bdir := filepath.Join(dir, compID.String()) index := filepath.Join(bdir, block.IndexFilename) @@ -803,7 +797,7 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Ensure the output block is not overlapping with anything else, // unless vertical compaction is enabled. if !cg.enableVerticalCompaction { - if err := cg.areBlocksOverlapping(newMeta, plan...); err != nil { + if err := cg.areBlocksOverlapping(newMeta, toCompact...); err != nil { return false, ulid.ULID{}, halt(errors.Wrapf(err, "resulted compacted block %s overlaps with something", bdir)) } } @@ -818,23 +812,17 @@ func (cg *Group) compact(ctx context.Context, dir string, comp tsdb.Compactor) ( // Mark for deletion the blocks we just compacted from the group and bucket so they do not get included // into the next planning cycle. // Eventually the block we just uploaded should get synced into the group again (including sync-delay). - for _, b := range plan { - if err := cg.deleteBlock(b); err != nil { + for _, meta := range toCompact { + if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String())); err != nil { return false, ulid.ULID{}, retry(errors.Wrapf(err, "mark old block for deletion from bucket")) } cg.groupGarbageCollectedBlocks.Inc() } - return true, compID, nil } -func (cg *Group) deleteBlock(b string) error { - id, err := ulid.Parse(filepath.Base(b)) - if err != nil { - return errors.Wrapf(err, "plan dir %s", b) - } - - if err := os.RemoveAll(b); err != nil { +func (cg *Group) deleteBlock(id ulid.ULID, bdir string) error { + if err := os.RemoveAll(bdir); err != nil { return errors.Wrapf(err, "remove old block dir %s", id) } @@ -842,7 +830,7 @@ func (cg *Group) deleteBlock(b string) error { delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() level.Info(cg.logger).Log("msg", "marking compacted block for deletion", "old_block", id) - if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, cg.blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(delCtx, cg.logger, cg.bkt, id, "source of compacted block", cg.blocksMarkedForDeletion); err != nil { return errors.Wrapf(err, "mark block %s for deletion from bucket", id) } return nil @@ -853,7 +841,8 @@ type BucketCompactor struct { logger log.Logger sy *Syncer grouper Grouper - comp tsdb.Compactor + comp Compactor + planner Planner compactDir string bkt objstore.Bucket concurrency int @@ -864,7 +853,8 @@ func NewBucketCompactor( logger log.Logger, sy *Syncer, grouper Grouper, - comp tsdb.Compactor, + planner Planner, + comp Compactor, compactDir string, bkt objstore.Bucket, concurrency int, @@ -876,6 +866,7 @@ func NewBucketCompactor( logger: logger, sy: sy, grouper: grouper, + planner: planner, comp: comp, compactDir: compactDir, bkt: bkt, @@ -913,7 +904,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { go func() { defer wg.Done() for g := range groupChan { - shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.comp) + shouldRerunGroup, _, err := g.Compact(workCtx, c.compactDir, c.planner, c.comp) if err == nil { if shouldRerunGroup { mtx.Lock() @@ -962,7 +953,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { level.Info(c.logger).Log("msg", "start of compactions") // Send all groups found during this pass to the compaction workers. - var groupErrs terrors.MultiError + var groupErrs errutil.MultiError groupLoop: for _, g := range groups { select { @@ -984,7 +975,7 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { workCtxCancel() if len(groupErrs) > 0 { - return groupErrs + return groupErrs.Err() } if finishedAllGroups { @@ -994,3 +985,50 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { level.Info(c.logger).Log("msg", "compaction iterations done") return nil } + +var _ block.MetadataFilter = &GatherNoCompactionMarkFilter{} + +// GatherNoCompactionMarkFilter is a block.Fetcher filter that passes all metas. While doing it, it gathers all no-compact-mark.json markers. +// Not go routine safe. +// TODO(bwplotka): Add unit test. +type GatherNoCompactionMarkFilter struct { + logger log.Logger + bkt objstore.InstrumentedBucketReader + noCompactMarkedMap map[ulid.ULID]*metadata.NoCompactMark +} + +// NewGatherNoCompactionMarkFilter creates GatherNoCompactionMarkFilter. +func NewGatherNoCompactionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader) *GatherNoCompactionMarkFilter { + return &GatherNoCompactionMarkFilter{ + logger: logger, + bkt: bkt, + } +} + +// NoCompactMarkedBlocks returns block ids that were marked for no compaction. +func (f *GatherNoCompactionMarkFilter) NoCompactMarkedBlocks() map[ulid.ULID]*metadata.NoCompactMark { + return f.noCompactMarkedMap +} + +// Filter passes all metas, while gathering no compact markers. +func (f *GatherNoCompactionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { + f.noCompactMarkedMap = make(map[ulid.ULID]*metadata.NoCompactMark) + + for id := range metas { + m := &metadata.NoCompactMark{} + // TODO(bwplotka): Hook up bucket cache here + reset API so we don't introduce API calls . + if err := metadata.ReadMarker(ctx, f.logger, f.bkt, id.String(), m); err != nil { + if errors.Cause(err) == metadata.ErrorMarkerNotFound { + continue + } + if errors.Cause(err) == metadata.ErrorUnmarshalMarker { + level.Warn(f.logger).Log("msg", "found partial no-compact-mark.json; if we will see it happening often for the same block, consider manually deleting no-compact-mark.json from the object storage", "block", id, "err", err) + continue + } + return err + } + synced.WithLabelValues(block.MarkedForNoCompactionMeta).Inc() + f.noCompactMarkedMap[id] = m + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index b07fcfb47e3fa..909252aee9d0d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -18,9 +18,9 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/index" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/runutil" ) @@ -73,7 +73,7 @@ func Downsample( // Remove blockDir in case of errors. defer func() { if err != nil { - var merr tsdberrors.MultiError + var merr errutil.MultiError merr.Add(err) merr.Add(os.RemoveAll(blockDir)) err = merr.Err() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go index 8a7a3f4f4875e..1872091bfdaa4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go @@ -15,11 +15,11 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/runutil" ) @@ -61,7 +61,7 @@ func NewStreamedBlockWriter( // We should close any opened Closer up to an error. defer func() { if err != nil { - var merr tsdberrors.MultiError + var merr errutil.MultiError merr.Add(err) for _, cl := range closers { merr.Add(cl.Close()) @@ -143,7 +143,7 @@ func (w *streamedBlockWriter) Close() error { } w.finalized = true - merr := tsdberrors.MultiError{} + merr := errutil.MultiError{} if w.ignoreFinalize { // Close open file descriptors anyway. @@ -201,12 +201,12 @@ func (w *streamedBlockWriter) syncDir() (err error) { // writeMetaFile writes meta file. func (w *streamedBlockWriter) writeMetaFile() error { - w.meta.Version = metadata.MetaVersion1 + w.meta.Version = metadata.TSDBVersion1 w.meta.Thanos.Source = metadata.CompactorSource w.meta.Thanos.SegmentFiles = block.GetSegmentFiles(w.blockDir) w.meta.Stats.NumChunks = w.totalChunks w.meta.Stats.NumSamples = w.totalSamples w.meta.Stats.NumSeries = w.seriesRefs - return metadata.Write(w.logger, w.blockDir, &w.meta) + return w.meta.WriteToDir(w.logger, w.blockDir) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go new file mode 100644 index 0000000000000..208d4832a4949 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -0,0 +1,303 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package compact + +import ( + "context" + "fmt" + "math" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" +) + +type tsdbBasedPlanner struct { + logger log.Logger + + ranges []int64 + + noCompBlocksFunc func() map[ulid.ULID]*metadata.NoCompactMark +} + +var _ Planner = &tsdbBasedPlanner{} + +// NewTSDBBasedPlanner is planner with the same functionality as Prometheus' TSDB. +// TODO(bwplotka): Consider upstreaming this to Prometheus. +// It's the same functionality just without accessing filesystem. +func NewTSDBBasedPlanner(logger log.Logger, ranges []int64) *tsdbBasedPlanner { + return &tsdbBasedPlanner{ + logger: logger, + ranges: ranges, + noCompBlocksFunc: func() map[ulid.ULID]*metadata.NoCompactMark { + return make(map[ulid.ULID]*metadata.NoCompactMark) + }, + } +} + +// NewPlanner is a default Thanos planner with the same functionality as Prometheus' TSDB plus special handling of excluded blocks. +// It's the same functionality just without accessing filesystem, and special handling of excluded blocks. +func NewPlanner(logger log.Logger, ranges []int64, noCompBlocks *GatherNoCompactionMarkFilter) *tsdbBasedPlanner { + return &tsdbBasedPlanner{logger: logger, ranges: ranges, noCompBlocksFunc: noCompBlocks.NoCompactMarkedBlocks} +} + +// TODO(bwplotka): Consider smarter algorithm, this prefers smaller iterative compactions vs big single one: https://github.com/thanos-io/thanos/issues/3405 +func (p *tsdbBasedPlanner) Plan(_ context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + return p.plan(p.noCompBlocksFunc(), metasByMinTime) +} + +func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompactMark, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + notExcludedMetasByMinTime := make([]*metadata.Meta, 0, len(metasByMinTime)) + for _, meta := range metasByMinTime { + if _, excluded := noCompactMarked[meta.ULID]; excluded { + continue + } + notExcludedMetasByMinTime = append(notExcludedMetasByMinTime, meta) + } + + res := selectOverlappingMetas(notExcludedMetasByMinTime) + if len(res) > 0 { + return res, nil + } + // No overlapping blocks, do compaction the usual way. + + // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. + // This gives users a window of a full block size maintenance if needed. + if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { + notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] + } + metasByMinTime = metasByMinTime[:len(metasByMinTime)-1] + res = append(res, selectMetas(p.ranges, noCompactMarked, metasByMinTime)...) + if len(res) > 0 { + return res, nil + } + + // Compact any blocks with big enough time range that have >5% tombstones. + for i := len(notExcludedMetasByMinTime) - 1; i >= 0; i-- { + meta := notExcludedMetasByMinTime[i] + if meta.MaxTime-meta.MinTime < p.ranges[len(p.ranges)/2] { + break + } + if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 { + return []*metadata.Meta{notExcludedMetasByMinTime[i]}, nil + } + } + + return nil, nil +} + +// selectMetas returns the dir metas that should be compacted into a single new block. +// If only a single block range is configured, the result is always nil. +// Copied and adjusted from https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L229. +func selectMetas(ranges []int64, noCompactMarked map[ulid.ULID]*metadata.NoCompactMark, metasByMinTime []*metadata.Meta) []*metadata.Meta { + if len(ranges) < 2 || len(metasByMinTime) < 1 { + return nil + } + highTime := metasByMinTime[len(metasByMinTime)-1].MinTime + + for _, iv := range ranges[1:] { + parts := splitByRange(metasByMinTime, iv) + if len(parts) == 0 { + continue + } + Outer: + for _, p := range parts { + // Do not select the range if it has a block whose compaction failed. + for _, m := range p { + if m.Compaction.Failed { + continue Outer + } + } + + if len(p) < 2 { + continue + } + + mint := p[0].MinTime + maxt := p[len(p)-1].MaxTime + + // Pick the range of blocks if it spans the full range (potentially with gaps) or is before the most recent block. + // This ensures we don't compact blocks prematurely when another one of the same size still would fits in the range + // after upload. + if maxt-mint != iv && maxt > highTime { + continue + } + + // Check if any of resulted blocks are excluded. Exclude them in a way that does not introduce gaps to the system + // as well as preserve the ranges that would be used if they were not excluded. + // This is meant as short-term workaround to create ability for marking some blocks to not be touched for compaction. + lastExcluded := 0 + for i, id := range p { + if _, excluded := noCompactMarked[id.ULID]; !excluded { + continue + } + if len(p[lastExcluded:i]) > 1 { + return p[lastExcluded:i] + } + lastExcluded = i + 1 + } + if len(p[lastExcluded:]) > 1 { + return p[lastExcluded:] + } + } + } + + return nil +} + +// selectOverlappingMetas returns all dirs with overlapping time ranges. +// It expects sorted input by mint and returns the overlapping dirs in the same order as received. +// Copied and adjusted from https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L268. +func selectOverlappingMetas(metasByMinTime []*metadata.Meta) []*metadata.Meta { + if len(metasByMinTime) < 2 { + return nil + } + var overlappingMetas []*metadata.Meta + globalMaxt := metasByMinTime[0].MaxTime + for i, m := range metasByMinTime[1:] { + if m.MinTime < globalMaxt { + if len(overlappingMetas) == 0 { + // When it is the first overlap, need to add the last one as well. + overlappingMetas = append(overlappingMetas, metasByMinTime[i]) + } + overlappingMetas = append(overlappingMetas, m) + } else if len(overlappingMetas) > 0 { + break + } + + if m.MaxTime > globalMaxt { + globalMaxt = m.MaxTime + } + } + return overlappingMetas +} + +// splitByRange splits the directories by the time range. The range sequence starts at 0. +// +// For example, if we have blocks [0-10, 10-20, 50-60, 90-100] and the split range tr is 30 +// it returns [0-10, 10-20], [50-60], [90-100]. +// Copied and adjusted from: https://github.com/prometheus/prometheus/blob/3d8826a3d42566684283a9b7f7e812e412c24407/tsdb/compact.go#L294. +func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta { + var splitDirs [][]*metadata.Meta + + for i := 0; i < len(metasByMinTime); { + var ( + group []*metadata.Meta + t0 int64 + m = metasByMinTime[i] + ) + // Compute start of aligned time range of size tr closest to the current block's start. + if m.MinTime >= 0 { + t0 = tr * (m.MinTime / tr) + } else { + t0 = tr * ((m.MinTime - tr + 1) / tr) + } + + // Skip blocks that don't fall into the range. This can happen via mis-alignment or + // by being the multiple of the intended range. + if m.MaxTime > t0+tr { + i++ + continue + } + + // Add all metas to the current group that are within [t0, t0+tr]. + for ; i < len(metasByMinTime); i++ { + // Either the block falls into the next range or doesn't fit at all (checked above). + if metasByMinTime[i].MaxTime > t0+tr { + break + } + group = append(group, metasByMinTime[i]) + } + + if len(group) > 0 { + splitDirs = append(splitDirs, group) + } + } + + return splitDirs +} + +type largeTotalIndexSizeFilter struct { + *tsdbBasedPlanner + + bkt objstore.Bucket + markedForNoCompact prometheus.Counter + totalMaxIndexSizeBytes int64 +} + +var _ Planner = &largeTotalIndexSizeFilter{} + +// WithLargeTotalIndexSizeFilter wraps Planner with largeTotalIndexSizeFilter that checks the given plans and estimates total index size. +// When found, it marks block for no compaction by placing no-compact.json and updating cache. +// NOTE: The estimation is very rough as it assumes extreme cases of indexes sharing no bytes, thus summing all source index sizes. +// Adjust limit accordingly reducing to some % of actual limit you want to give. +// TODO(bwplotka): This is short term fix for https://github.com/thanos-io/thanos/issues/1424, replace with vertical block sharding https://github.com/thanos-io/thanos/pull/3390. +func WithLargeTotalIndexSizeFilter(with *tsdbBasedPlanner, bkt objstore.Bucket, totalMaxIndexSizeBytes int64, markedForNoCompact prometheus.Counter) *largeTotalIndexSizeFilter { + return &largeTotalIndexSizeFilter{tsdbBasedPlanner: with, bkt: bkt, totalMaxIndexSizeBytes: totalMaxIndexSizeBytes, markedForNoCompact: markedForNoCompact} +} + +func (t *largeTotalIndexSizeFilter) Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) { + noCompactMarked := t.noCompBlocksFunc() + copiedNoCompactMarked := make(map[ulid.ULID]*metadata.NoCompactMark, len(noCompactMarked)) + for k, v := range noCompactMarked { + copiedNoCompactMarked[k] = v + } + +PlanLoop: + for { + plan, err := t.plan(copiedNoCompactMarked, metasByMinTime) + if err != nil { + return nil, err + } + var totalIndexBytes, maxIndexSize int64 = 0, math.MinInt64 + var biggestIndex int + for i, p := range plan { + indexSize := int64(-1) + for _, f := range p.Thanos.Files { + if f.RelPath == block.IndexFilename { + indexSize = f.SizeBytes + } + } + if indexSize <= 0 { + // Get size from bkt instead. + attr, err := t.bkt.Attributes(ctx, filepath.Join(p.ULID.String(), block.IndexFilename)) + if err != nil { + return nil, errors.Wrapf(err, "get attr of %v", filepath.Join(p.ULID.String(), block.IndexFilename)) + } + indexSize = attr.Size + } + + if maxIndexSize < indexSize { + maxIndexSize = indexSize + biggestIndex = i + } + totalIndexBytes += indexSize + if totalIndexBytes >= t.totalMaxIndexSizeBytes { + // Marking blocks for no compact to limit size. + // TODO(bwplotka): Make sure to reset cache once this is done: https://github.com/thanos-io/thanos/issues/3408 + if err := block.MarkForNoCompact( + ctx, + t.logger, + t.bkt, + plan[biggestIndex].ULID, + metadata.IndexSizeExceedingNoCompactReason, + fmt.Sprintf("largeTotalIndexSizeFilter: Total compacted block's index size could exceed: %v with this block. See https://github.com/thanos-io/thanos/issues/1424", t.totalMaxIndexSizeBytes), + t.markedForNoCompact, + ); err != nil { + return nil, errors.Wrapf(err, "mark %v for no compaction", plan[biggestIndex].ULID.String()) + } + // Make sure wrapped planner exclude this block. + copiedNoCompactMarked[plan[biggestIndex].ULID] = &metadata.NoCompactMark{ID: plan[biggestIndex].ULID, Version: metadata.NoCompactMarkVersion1} + continue PlanLoop + } + } + // Planned blocks should not exceed limit. + return plan, nil + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go b/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go index 8d1ba7d5fb727..703bad5dda422 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/retention.go @@ -5,6 +5,7 @@ package compact import ( "context" + "fmt" "time" "github.com/go-kit/kit/log" @@ -37,7 +38,7 @@ func ApplyRetentionPolicyByResolution( maxTime := time.Unix(m.MaxTime/1000, 0) if time.Now().After(maxTime.Add(retentionDuration)) { level.Info(logger).Log("msg", "applying retention: marking block for deletion", "id", id, "maxTime", maxTime.String()) - if err := block.MarkForDeletion(ctx, logger, bkt, id, blocksMarkedForDeletion); err != nil { + if err := block.MarkForDeletion(ctx, logger, bkt, id, fmt.Sprintf("block exceeding retention of %v", retentionDuration), blocksMarkedForDeletion); err != nil { return errors.Wrap(err, "delete block") } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go index c7451c4767f99..91ec2f9ec510f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/component/component.go +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -91,6 +91,8 @@ func FromProto(storeType storepb.StoreType) StoreAPI { var ( Bucket = source{component: component{name: "bucket"}} Cleanup = source{component: component{name: "cleanup"}} + Mark = source{component: component{name: "mark"}} + Rewrite = source{component: component{name: "rewrite"}} Compact = source{component: component{name: "compact"}} Downsample = source{component: component{name: "downsample"}} Replicate = source{component: component{name: "replicate"}} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 75c3b02fd1983..2d11e1cf91843 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -13,9 +13,9 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns" + "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" ) @@ -111,7 +111,7 @@ func GetQTypeName(addr string) (qtype string, name string) { // defaultPort is used for non-SRV records when a port is not supplied. func (p *Provider) Resolve(ctx context.Context, addrs []string) error { resolvedAddrs := map[string][]string{} - errs := tsdberrors.MultiError{} + errs := errutil.MultiError{} for _, addr := range addrs { var resolved []string diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go new file mode 100644 index 0000000000000..e51bf4554e9d7 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go @@ -0,0 +1,51 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errutil + +import ( + "bytes" + "fmt" +) + +// The MultiError type implements the error interface, and contains the +// Errors used to construct it. +type MultiError []error + +// Returns a concatenated string of the contained errors. +func (es MultiError) Error() string { + var buf bytes.Buffer + + if len(es) > 1 { + fmt.Fprintf(&buf, "%d errors: ", len(es)) + } + + for i, err := range es { + if i != 0 { + buf.WriteString("; ") + } + buf.WriteString(err.Error()) + } + + return buf.String() +} + +// Add adds the error to the error list if it is not nil. +func (es *MultiError) Add(err error) { + if err == nil { + return + } + if merr, ok := err.(MultiError); ok { + *es = append(*es, merr...) + } else { + *es = append(*es, err) + } +} + +// Err returns the error list as an error or nil if it is empty. +func (es MultiError) Err() error { + if len(es) == 0 { + return nil + } + return es +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go index a372878ac950d..9705e299cd2e5 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/azure/helpers.go @@ -19,6 +19,17 @@ const DirDelim = "/" var errorCodeRegex = regexp.MustCompile(`X-Ms-Error-Code:\D*\[(\w+)\]`) +func init() { + // Disable `ForceLog` in Azure storage module + // As the time of this patch, the logging function in the storage module isn't correctly + // detecting expected REST errors like 404 and so outputs them to syslog along with a stacktrace. + // https://github.com/Azure/azure-storage-blob-go/issues/214 + // + // This needs to be done at startup because the underlying variable is not thread safe. + // https://github.com/Azure/azure-pipeline-go/blob/dc95902f1d32034f8f743ccc6c3f2eb36b84da27/pipeline/core.go#L276-L283 + pipeline.SetForceLogEnabled(false) +} + func getContainerURL(ctx context.Context, conf Config) (blob.ContainerURL, error) { c, err := blob.NewSharedKeyCredential(conf.StorageAccountName, conf.StorageAccountKey) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/filesystem/filesystem.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/filesystem/filesystem.go index 3a42d0ccd37be..72c0c017efc59 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/filesystem/filesystem.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/filesystem/filesystem.go @@ -54,7 +54,7 @@ func NewBucket(rootDir string) (*Bucket, error) { // Iter calls f for each entry in the given directory. The argument to f is the full // object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) error { +func (b *Bucket) Iter(_ context.Context, dir string, f func(string) error) error { absDir := filepath.Join(b.rootDir, dir) info, err := os.Stat(absDir) if err != nil { @@ -108,7 +108,7 @@ func (r *rangeReaderCloser) Close() error { } // Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { +func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { file := filepath.Join(b.rootDir, name) stat, err := os.Stat(file) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go index cdab0925a6caa..cf5317d43d7e1 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go @@ -118,7 +118,7 @@ func TryToGetSize(r io.Reader) (int64, error) { case *strings.Reader: return f.Size(), nil } - return 0, errors.New("unsupported type of io.Reader") + return 0, errors.Errorf("unsupported type of io.Reader: %T", r) } // UploadDir uploads all files in srcdir to the bucket with into a top-level directory diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go index 923a42539c309..eb679679805b3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go @@ -59,16 +59,17 @@ var DefaultConfig = Config{ // Config stores the configuration for s3 bucket. type Config struct { - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - Region string `yaml:"region"` - AccessKey string `yaml:"access_key"` - Insecure bool `yaml:"insecure"` - SignatureV2 bool `yaml:"signature_version2"` - SecretKey string `yaml:"secret_key"` - PutUserMetadata map[string]string `yaml:"put_user_metadata"` - HTTPConfig HTTPConfig `yaml:"http_config"` - TraceConfig TraceConfig `yaml:"trace"` + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + Region string `yaml:"region"` + AccessKey string `yaml:"access_key"` + Insecure bool `yaml:"insecure"` + SignatureV2 bool `yaml:"signature_version2"` + SecretKey string `yaml:"secret_key"` + PutUserMetadata map[string]string `yaml:"put_user_metadata"` + HTTPConfig HTTPConfig `yaml:"http_config"` + TraceConfig TraceConfig `yaml:"trace"` + ListObjectsVersion string `yaml:"list_objects_version"` // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. PartSize uint64 `yaml:"part_size"` SSEConfig SSEConfig `yaml:"sse_config"` @@ -137,6 +138,7 @@ type Bucket struct { sse encrypt.ServerSide putUserMetadata map[string]string partSize uint64 + listObjectsV1 bool } // parseConfig unmarshals a buffer into a Config with default HTTPConfig values. @@ -159,36 +161,54 @@ func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error return NewBucketWithConfig(logger, config, component) } +type overrideSignerType struct { + credentials.Provider + signerType credentials.SignatureType +} + +func (s *overrideSignerType) Retrieve() (credentials.Value, error) { + v, err := s.Provider.Retrieve() + if err != nil { + return v, err + } + if !v.SignerType.IsAnonymous() { + v.SignerType = s.signerType + } + return v, nil +} + // NewBucketWithConfig returns a new Bucket using the provided s3 config values. func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { var chain []credentials.Provider + // TODO(bwplotka): Don't do flags as they won't scale, use actual params like v2, v4 instead + wrapCredentialsProvider := func(p credentials.Provider) credentials.Provider { return p } + if config.SignatureV2 { + wrapCredentialsProvider = func(p credentials.Provider) credentials.Provider { + return &overrideSignerType{Provider: p, signerType: credentials.SignatureV2} + } + } + if err := validate(config); err != nil { return nil, err } if config.AccessKey != "" { - signature := credentials.SignatureV4 - // TODO(bwplotka): Don't do flags, use actual v2, v4 params. - if config.SignatureV2 { - signature = credentials.SignatureV2 - } - - chain = []credentials.Provider{&credentials.Static{ + chain = []credentials.Provider{wrapCredentialsProvider(&credentials.Static{ Value: credentials.Value{ AccessKeyID: config.AccessKey, SecretAccessKey: config.SecretKey, - SignerType: signature, + SignerType: credentials.SignatureV4, }, - }} + })} } else { chain = []credentials.Provider{ - &credentials.EnvAWS{}, - &credentials.FileAWSCredentials{}, - &credentials.IAM{ + wrapCredentialsProvider(&credentials.EnvAWS{}), + wrapCredentialsProvider(&credentials.FileAWSCredentials{}), + wrapCredentialsProvider(&credentials.IAM{ Client: &http.Client{ Transport: http.DefaultTransport, }, - }, + }), } } @@ -246,6 +266,10 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B client.TraceOn(logWriter) } + if config.ListObjectsVersion != "" && config.ListObjectsVersion != "v1" && config.ListObjectsVersion != "v2" { + return nil, errors.Errorf("Initialize s3 client list objects version: Unsupported version %q was provided. Supported values are v1, v2", config.ListObjectsVersion) + } + bkt := &Bucket{ logger: logger, name: config.Bucket, @@ -253,6 +277,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B sse: sse, putUserMetadata: config.PutUserMetadata, partSize: config.PartSize, + listObjectsV1: config.ListObjectsVersion == "v1", } return bkt, nil } @@ -309,6 +334,7 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) err opts := minio.ListObjectsOptions{ Prefix: dir, Recursive: false, + UseV1: b.listObjectsV1, } for object := range b.client.ListObjects(ctx, b.name, opts) { diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index ab6c15c6f88a0..c20e5b162b262 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -97,14 +98,26 @@ func NewWithTracingClient(logger log.Logger, userAgent string) *Client { ) } -func (c *Client) get2xx(ctx context.Context, u *url.URL) (_ []byte, _ int, err error) { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) +// req2xx sends a request to the given url.URL. If method is http.MethodPost then +// the raw query is encoded in the body and the appropriate Content-Type is set. +func (c *Client) req2xx(ctx context.Context, u *url.URL, method string) (_ []byte, _ int, err error) { + var b io.Reader + if method == http.MethodPost { + rq := u.RawQuery + b = strings.NewReader(rq) + u.RawQuery = "" + } + + req, err := http.NewRequest(method, u.String(), b) if err != nil { return nil, 0, errors.Wrap(err, "create GET request") } if c.userAgent != "" { req.Header.Set("User-Agent", c.userAgent) } + if method == http.MethodPost { + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } resp, err := c.Do(req.WithContext(ctx)) if err != nil { @@ -148,7 +161,7 @@ func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labe span, ctx := tracing.StartSpan(ctx, "/prom_config HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, err } @@ -339,6 +352,7 @@ func (c *Client) Snapshot(ctx context.Context, base *url.URL, skipHead bool) (st type QueryOptions struct { Deduplicate bool PartialResponseStrategy storepb.PartialResponseStrategy + Method string } func (p *QueryOptions) AddTo(values url.Values) error { @@ -381,7 +395,12 @@ func (c *Client) QueryInstant(ctx context.Context, base *url.URL, query string, span, ctx := tracing.StartSpan(ctx, "/prom_query_instant HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + method := opts.Method + if method == "" { + method = http.MethodGet + } + + body, _, err := c.req2xx(ctx, &u, method) if err != nil { return nil, nil, errors.Wrap(err, "read query instant response") } @@ -483,7 +502,7 @@ func (c *Client) QueryRange(ctx context.Context, base *url.URL, query string, st span, ctx := tracing.StartSpan(ctx, "/prom_query_range HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, nil, errors.Wrap(err, "read query range response") } @@ -565,7 +584,7 @@ func (c *Client) AlertmanagerAlerts(ctx context.Context, base *url.URL) ([]*mode span, ctx := tracing.StartSpan(ctx, "/alertmanager_alerts HTTP[client]") defer span.Finish() - body, _, err := c.get2xx(ctx, &u) + body, _, err := c.req2xx(ctx, &u, http.MethodGet) if err != nil { return nil, err } @@ -592,7 +611,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string span, ctx := tracing.StartSpan(ctx, spanName) defer span.Finish() - body, code, err := c.get2xx(ctx, u) + body, code, err := c.req2xx(ctx, u, http.MethodGet) if err != nil { if code, exists := statusToCode[code]; exists && code != 0 { return status.Error(code, err.Error()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 5b3e85940bfe6..3f817a62ee4af 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -59,7 +59,8 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" - tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + + "github.com/thanos-io/thanos/pkg/errutil" ) // Repeat executes f every interval seconds until stopc is closed or f returns an error. @@ -136,7 +137,7 @@ func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, // CloseWithErrCapture runs function and on error return error by argument including the given error (usually // from caller function). func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...interface{}) { - merr := tsdberrors.MultiError{} + merr := errutil.MultiError{} merr.Add(*err) merr.Add(errors.Wrapf(closer.Close(), format, a...)) @@ -151,7 +152,7 @@ func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a .. CloseWithErrCapture(err, r, format, a...) // Prepend the io.Copy error. - merr := tsdberrors.MultiError{} + merr := errutil.MultiError{} merr.Add(copyErr) merr.Add(*err) diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go index 06b8391e3f59c..df66622774b07 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go +++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go @@ -360,7 +360,7 @@ func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error { } meta.Thanos.Source = s.source meta.Thanos.SegmentFiles = block.GetSegmentFiles(updir) - if err := metadata.Write(s.logger, updir, meta); err != nil { + if err := meta.WriteToDir(s.logger, updir); err != nil { return errors.Wrap(err, "write meta file") } return block.Upload(ctx, s.logger, s.bucket, updir) @@ -390,7 +390,7 @@ func (s *Shipper) blockMetasFromOldest() (metas []*metadata.Meta, _ error) { if !fi.IsDir() { continue } - m, err := metadata.Read(dir) + m, err := metadata.ReadFromDir(dir) if err != nil { return nil, errors.Wrapf(err, "read metadata for block %v", dir) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 2c162c4885c57..1d0195c529fe0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -40,6 +40,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/model" "github.com/thanos-io/thanos/pkg/objstore" @@ -246,14 +247,19 @@ type FilterConfig struct { // BucketStore implements the store API backed by a bucket. It loads all index // files to local disk. +// +// NOTE: Bucket store reencodes postings using diff+varint+snappy when storing to cache. +// This makes them smaller, but takes extra CPU and memory. +// When used with in-memory cache, memory usage should decrease overall, thanks to postings being smaller. type BucketStore struct { - logger log.Logger - metrics *bucketStoreMetrics - bkt objstore.InstrumentedBucketReader - fetcher block.MetadataFetcher - dir string - indexCache storecache.IndexCache - chunkPool pool.BytesPool + logger log.Logger + metrics *bucketStoreMetrics + bkt objstore.InstrumentedBucketReader + fetcher block.MetadataFetcher + dir string + indexCache storecache.IndexCache + indexReaderPool *indexheader.ReaderPool + chunkPool pool.BytesPool // Sets of blocks that have the same labels. They are indexed by a hash over their label set. mtx sync.RWMutex @@ -276,10 +282,6 @@ type BucketStore struct { advLabelSets []labelpb.ZLabelSet enableCompatibilityLabel bool - // Reencode postings using diff+varint+snappy when storing to cache. - // This makes them smaller, but takes extra CPU and memory. - // When used with in-memory cache, memory usage should decrease overall, thanks to postings being smaller. - enablePostingsCompression bool postingOffsetsInMemSampling int // Enables hints in the Series() response. @@ -302,9 +304,10 @@ func NewBucketStore( blockSyncConcurrency int, filterConfig *FilterConfig, enableCompatibilityLabel bool, - enablePostingsCompression bool, postingOffsetsInMemSampling int, enableSeriesResponseHints bool, // TODO(pracucci) Thanos 0.12 and below doesn't gracefully handle new fields in SeriesResponse. Drop this flag and always enable hints once we can drop backward compatibility. + lazyIndexReaderEnabled bool, + lazyIndexReaderIdleTimeout time.Duration, ) (*BucketStore, error) { if logger == nil { logger = log.NewNopLogger() @@ -321,6 +324,7 @@ func NewBucketStore( fetcher: fetcher, dir: dir, indexCache: indexCache, + indexReaderPool: indexheader.NewReaderPool(logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, extprom.WrapRegistererWithPrefix("thanos_bucket_store_", reg)), chunkPool: chunkPool, blocks: map[ulid.ULID]*bucketBlock{}, blockSets: map[uint64]*bucketBlockSet{}, @@ -331,7 +335,6 @@ func NewBucketStore( chunksLimiterFactory: chunksLimiterFactory, partitioner: gapBasedPartitioner{maxGapSize: partitionerMaxGapSize}, enableCompatibilityLabel: enableCompatibilityLabel, - enablePostingsCompression: enablePostingsCompression, postingOffsetsInMemSampling: postingOffsetsInMemSampling, enableSeriesResponseHints: enableSeriesResponseHints, metrics: newBucketStoreMetrics(reg), @@ -352,6 +355,8 @@ func (s *BucketStore) Close() (err error) { for _, b := range s.blocks { runutil.CloseWithErrCapture(&err, b, "closing Bucket Block") } + + s.indexReaderPool.Close() return err } @@ -484,7 +489,7 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er lset := labels.FromMap(meta.Thanos.Labels) h := lset.Hash() - indexHeaderReader, err := indexheader.NewBinaryReader( + indexHeaderReader, err := s.indexReaderPool.NewBinaryReader( ctx, s.logger, s.bkt, @@ -512,7 +517,6 @@ func (s *BucketStore) addBlock(ctx context.Context, meta *metadata.Meta) (err er s.chunkPool, indexHeaderReader, s.partitioner, - s.enablePostingsCompression, ) if err != nil { return errors.Wrap(err, "new bucket block") @@ -704,53 +708,52 @@ func blockSeries( chks []chunks.Meta ) for _, id := range ps { - if err := indexr.LoadedSeries(id, &lset, &chks); err != nil { + if err := indexr.LoadedSeries(id, &lset, &chks, req); err != nil { return nil, nil, errors.Wrap(err, "read series") } - s := seriesEntry{ - lset: make(labels.Labels, 0, len(lset)+len(extLset)), - refs: make([]uint64, 0, len(chks)), - chks: make([]storepb.AggrChunk, 0, len(chks)), - } - for _, l := range lset { - // Skip if the external labels of the block overrule the series' label. - // NOTE(fabxc): maybe move it to a prefixed version to still ensure uniqueness of series? - if extLset[l.Name] != "" { - continue - } - s.lset = append(s.lset, l) - } - for ln, lv := range extLset { - s.lset = append(s.lset, labels.Label{Name: ln, Value: lv}) - } - sort.Sort(s.lset) + if len(chks) > 0 { + s := seriesEntry{lset: make(labels.Labels, 0, len(lset)+len(extLset))} + if !req.SkipChunks { + s.refs = make([]uint64, 0, len(chks)) + s.chks = make([]storepb.AggrChunk, 0, len(chks)) + for _, meta := range chks { + if err := chunkr.addPreload(meta.Ref); err != nil { + return nil, nil, errors.Wrap(err, "add chunk preload") + } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) + } - for _, meta := range chks { - if meta.MaxTime < req.MinTime { - continue - } - if meta.MinTime > req.MaxTime { - break + // Reserve chunksLimiter if we save chunks. + if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { + return nil, nil, errors.Wrap(err, "exceeded chunks limit") + } } - if err := chunkr.addPreload(meta.Ref); err != nil { - return nil, nil, errors.Wrap(err, "add chunk preload") + for _, l := range lset { + // Skip if the external labels of the block overrule the series' label. + // NOTE(fabxc): maybe move it to a prefixed version to still ensure uniqueness of series? + if extLset[l.Name] != "" { + continue + } + s.lset = append(s.lset, l) } - s.chks = append(s.chks, storepb.AggrChunk{ - MinTime: meta.MinTime, - MaxTime: meta.MaxTime, - }) - s.refs = append(s.refs, meta.Ref) - } - if len(s.chks) > 0 { - if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded chunks limit") + for ln, lv := range extLset { + s.lset = append(s.lset, labels.Label{Name: ln, Value: lv}) } + sort.Sort(s.lset) res = append(res, s) } } + if req.SkipChunks { + return newBucketSeriesSet(res), indexr.stats, nil + } + // Preload all chunks that were marked in the previous stage. if err := chunkr.preload(); err != nil { return nil, nil, errors.Wrap(err, "preload chunks") @@ -920,13 +923,16 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie resHints.AddQueriedBlock(b.meta.ULID) } + var chunkr *bucketChunkReader // We must keep the readers open until all their data has been sent. indexr := b.indexReader(gctx) - chunkr := b.chunkReader(gctx) + if !req.SkipChunks { + chunkr = b.chunkReader(gctx) + defer runutil.CloseWithLogOnErr(s.logger, chunkr, "series block") + } // Defer all closes to the end of Series method. defer runutil.CloseWithLogOnErr(s.logger, indexr, "series block") - defer runutil.CloseWithLogOnErr(s.logger, chunkr, "series block") g.Go(func() error { part, pstats, err := blockSeries( @@ -1057,24 +1063,62 @@ func chunksSize(chks []storepb.AggrChunk) (size int) { // LabelNames implements the storepb.StoreServer interface. func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + resHints := &hintspb.LabelNamesResponseHints{} + g, gctx := errgroup.WithContext(ctx) s.mtx.RLock() var mtx sync.Mutex var sets [][]string + var reqBlockMatchers []*labels.Matcher + + if req.Hints != nil { + reqHints := &hintspb.LabelNamesRequestHints{} + err := types.UnmarshalAny(req.Hints, reqHints) + if err != nil { + return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label names request hints").Error()) + } + + reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + if err != nil { + return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) + } + } for _, b := range s.blocks { if !b.overlapsClosedInterval(req.Start, req.End) { continue } + if len(reqBlockMatchers) > 0 && !b.matchRelabelLabels(reqBlockMatchers) { + continue + } + + resHints.AddQueriedBlock(b.meta.ULID) + indexr := b.indexReader(gctx) + extLabels := b.meta.Thanos.Labels + g.Go(func() error { defer runutil.CloseWithLogOnErr(s.logger, indexr, "label names") // Do it via index reader to have pending reader registered correctly. - res := indexr.block.indexHeaderReader.LabelNames() + res, err := indexr.block.indexHeaderReader.LabelNames() + if err != nil { + return errors.Wrap(err, "label names") + } + + // Add a set for the external labels as well. + // We're not adding them directly to res because there could be duplicates. + extRes := make([]string, 0, len(extLabels)) + for lName := range extLabels { + extRes = append(extRes, lName) + } + sort.Strings(res) + sort.Strings(extRes) + + res = strutil.MergeSlices(res, extRes) mtx.Lock() sets = append(sets, res) @@ -1089,25 +1133,56 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq if err := g.Wait(); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + + anyHints, err := types.MarshalAny(resHints) + if err != nil { + return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error()) + } + return &storepb.LabelNamesResponse{ Names: strutil.MergeSlices(sets...), + Hints: anyHints, }, nil } // LabelValues implements the storepb.StoreServer interface. func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + resHints := &hintspb.LabelValuesResponseHints{} + g, gctx := errgroup.WithContext(ctx) s.mtx.RLock() var mtx sync.Mutex var sets [][]string + var reqBlockMatchers []*labels.Matcher + + if req.Hints != nil { + reqHints := &hintspb.LabelValuesRequestHints{} + err := types.UnmarshalAny(req.Hints, reqHints) + if err != nil { + return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label values request hints").Error()) + } + + reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + if err != nil { + return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) + } + } for _, b := range s.blocks { if !b.overlapsClosedInterval(req.Start, req.End) { continue } + if len(reqBlockMatchers) > 0 && !b.matchRelabelLabels(reqBlockMatchers) { + continue + } + + resHints.AddQueriedBlock(b.meta.ULID) + indexr := b.indexReader(gctx) + extLabels := b.meta.Thanos.Labels + g.Go(func() error { defer runutil.CloseWithLogOnErr(s.logger, indexr, "label values") @@ -1117,6 +1192,11 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return errors.Wrap(err, "index header label values") } + // Add the external label value as well. + if extLabelValue, ok := extLabels[req.Label]; ok { + res = strutil.MergeSlices(res, []string{extLabelValue}) + } + mtx.Lock() sets = append(sets, res) mtx.Unlock() @@ -1130,8 +1210,15 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR if err := g.Wait(); err != nil { return nil, status.Error(codes.Aborted, err.Error()) } + + anyHints, err := types.MarshalAny(resHints) + if err != nil { + return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label values response hints").Error()) + } + return &storepb.LabelValuesResponse{ Values: strutil.MergeSlices(sets...), + Hints: anyHints, }, nil } @@ -1289,8 +1376,6 @@ type bucketBlock struct { partitioner partitioner - enablePostingsCompression bool - // Block's labels used by block-level matchers to filter blocks to query. These are used to select blocks using // request hints' BlockMatchers. relabelLabels labels.Labels @@ -1307,19 +1392,17 @@ func newBucketBlock( chunkPool pool.BytesPool, indexHeadReader indexheader.Reader, p partitioner, - enablePostingsCompression bool, ) (b *bucketBlock, err error) { b = &bucketBlock{ - logger: logger, - metrics: metrics, - bkt: bkt, - indexCache: indexCache, - chunkPool: chunkPool, - dir: dir, - partitioner: p, - meta: meta, - indexHeaderReader: indexHeadReader, - enablePostingsCompression: enablePostingsCompression, + logger: logger, + metrics: metrics, + bkt: bkt, + indexCache: indexCache, + chunkPool: chunkPool, + dir: dir, + partitioner: p, + meta: meta, + indexHeaderReader: indexHeadReader, } // Translate the block's labels and inject the block ID as a label @@ -1553,7 +1636,11 @@ func (r *bucketIndexReader) ExpandedPostings(ms []*labels.Matcher) ([]uint64, er // As of version two all series entries are 16 byte padded. All references // we get have to account for that to get the correct offset. - if r.block.indexHeaderReader.IndexVersion() >= 2 { + version, err := r.block.indexHeaderReader.IndexVersion() + if err != nil { + return nil, errors.Wrap(err, "get index version") + } + if version >= 2 { for i, id := range ps { ps[i] = id * 16 } @@ -1755,22 +1842,20 @@ func (r *bucketIndexReader) fetchPostings(keys []labels.Label) ([]index.Postings compressionTime := time.Duration(0) compressions, compressionErrors, compressedSize := 0, 0, 0 - if r.block.enablePostingsCompression { - // Reencode postings before storing to cache. If that fails, we store original bytes. - // This can only fail, if postings data was somehow corrupted, - // and there is nothing we can do about it. - // Errors from corrupted postings will be reported when postings are used. - compressions++ - s := time.Now() - bep := newBigEndianPostings(pBytes[4:]) - data, err := diffVarintSnappyEncode(bep, bep.length()) - compressionTime = time.Since(s) - if err == nil { - dataToCache = data - compressedSize = len(data) - } else { - compressionErrors = 1 - } + // Reencode postings before storing to cache. If that fails, we store original bytes. + // This can only fail, if postings data was somehow corrupted, + // and there is nothing we can do about it. + // Errors from corrupted postings will be reported when postings are used. + compressions++ + s := time.Now() + bep := newBigEndianPostings(pBytes[4:]) + data, err := diffVarintSnappyEncode(bep, bep.length()) + compressionTime = time.Since(s) + if err == nil { + dataToCache = data + compressedSize = len(data) + } else { + compressionErrors = 1 } r.mtx.Lock() @@ -1988,7 +2073,8 @@ func (g gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64 // LoadedSeries populates the given labels and chunk metas for the series identified // by the reference. // Returns ErrNotFound if the ref does not resolve to a known series. -func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta, + req *storepb.SeriesRequest) error { b, ok := r.loadedSeries[ref] if !ok { return errors.Errorf("series %d not found", ref) @@ -1997,7 +2083,7 @@ func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks * r.stats.seriesTouched++ r.stats.seriesTouchedSizeSum += len(b) - return r.dec.Series(b, lset, chks) + return r.decodeSeriesWithReq(b, lset, chks, req) } // Close released the underlying resources of the reader. @@ -2006,6 +2092,95 @@ func (r *bucketIndexReader) Close() error { return nil } +// decodeSeriesWithReq decodes a series entry from the given byte slice based on the SeriesRequest. +func (r *bucketIndexReader) decodeSeriesWithReq(b []byte, lbls *labels.Labels, chks *[]chunks.Meta, + req *storepb.SeriesRequest) error { + *lbls = (*lbls)[:0] + *chks = (*chks)[:0] + + d := encoding.Decbuf{B: b} + + k := d.Uvarint() + + for i := 0; i < k; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + + if d.Err() != nil { + return errors.Wrap(d.Err(), "read series label offsets") + } + + ln, err := r.dec.LookupSymbol(lno) + if err != nil { + return errors.Wrap(err, "lookup label name") + } + lv, err := r.dec.LookupSymbol(lvo) + if err != nil { + return errors.Wrap(err, "lookup label value") + } + + *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) + } + + // Read the chunks meta data. + k = d.Uvarint() + + if k == 0 { + return nil + } + + t0 := d.Varint64() + maxt := int64(d.Uvarint64()) + t0 + ref0 := int64(d.Uvarint64()) + + // No chunk in the required time range. + if t0 > req.MaxTime { + return nil + } + + if req.MinTime <= maxt { + *chks = append(*chks, chunks.Meta{ + Ref: uint64(ref0), + MinTime: t0, + MaxTime: maxt, + }) + // Get a valid chunk, return if it is a skip chunk request. + if req.SkipChunks { + return nil + } + } + t0 = maxt + + for i := 1; i < k; i++ { + mint := int64(d.Uvarint64()) + t0 + maxt := int64(d.Uvarint64()) + mint + ref0 += d.Varint64() + t0 = maxt + + if maxt < req.MinTime { + continue + } + if mint > req.MaxTime { + break + } + + if d.Err() != nil { + return errors.Wrapf(d.Err(), "read meta for chunk %d", i) + } + + *chks = append(*chks, chunks.Meta{ + Ref: uint64(ref0), + MinTime: mint, + MaxTime: maxt, + }) + + if req.SkipChunks { + return nil + } + } + return d.Err() +} + type bucketChunkReader struct { ctx context.Context block *bucketBlock diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go index 91c081a9fa664..8d6beca145e7e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/custom.go @@ -10,3 +10,15 @@ func (m *SeriesResponseHints) AddQueriedBlock(id ulid.ULID) { Id: id.String(), }) } + +func (m *LabelNamesResponseHints) AddQueriedBlock(id ulid.ULID) { + m.QueriedBlocks = append(m.QueriedBlocks, Block{ + Id: id.String(), + }) +} + +func (m *LabelValuesResponseHints) AddQueriedBlock(id ulid.ULID) { + m.QueriedBlocks = append(m.QueriedBlocks, Block{ + Id: id.String(), + }) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go index d46d89a8c9b8d..9f78dbbbb2964 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.pb.go @@ -140,16 +140,176 @@ func (m *Block) XXX_DiscardUnknown() { var xxx_messageInfo_Block proto.InternalMessageInfo +type LabelNamesRequestHints struct { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` +} + +func (m *LabelNamesRequestHints) Reset() { *m = LabelNamesRequestHints{} } +func (m *LabelNamesRequestHints) String() string { return proto.CompactTextString(m) } +func (*LabelNamesRequestHints) ProtoMessage() {} +func (*LabelNamesRequestHints) Descriptor() ([]byte, []int) { + return fileDescriptor_b82aa23c4c11e83f, []int{3} +} +func (m *LabelNamesRequestHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesRequestHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesRequestHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesRequestHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequestHints.Merge(m, src) +} +func (m *LabelNamesRequestHints) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesRequestHints) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesRequestHints.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesRequestHints proto.InternalMessageInfo + +type LabelNamesResponseHints struct { + /// queried_blocks is the list of blocks that have been queried. + QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` +} + +func (m *LabelNamesResponseHints) Reset() { *m = LabelNamesResponseHints{} } +func (m *LabelNamesResponseHints) String() string { return proto.CompactTextString(m) } +func (*LabelNamesResponseHints) ProtoMessage() {} +func (*LabelNamesResponseHints) Descriptor() ([]byte, []int) { + return fileDescriptor_b82aa23c4c11e83f, []int{4} +} +func (m *LabelNamesResponseHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesResponseHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesResponseHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesResponseHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponseHints.Merge(m, src) +} +func (m *LabelNamesResponseHints) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesResponseHints) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesResponseHints.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesResponseHints proto.InternalMessageInfo + +type LabelValuesRequestHints struct { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + BlockMatchers []storepb.LabelMatcher `protobuf:"bytes,1,rep,name=block_matchers,json=blockMatchers,proto3" json:"block_matchers"` +} + +func (m *LabelValuesRequestHints) Reset() { *m = LabelValuesRequestHints{} } +func (m *LabelValuesRequestHints) String() string { return proto.CompactTextString(m) } +func (*LabelValuesRequestHints) ProtoMessage() {} +func (*LabelValuesRequestHints) Descriptor() ([]byte, []int) { + return fileDescriptor_b82aa23c4c11e83f, []int{5} +} +func (m *LabelValuesRequestHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesRequestHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesRequestHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesRequestHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequestHints.Merge(m, src) +} +func (m *LabelValuesRequestHints) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesRequestHints) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesRequestHints.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesRequestHints proto.InternalMessageInfo + +type LabelValuesResponseHints struct { + /// queried_blocks is the list of blocks that have been queried. + QueriedBlocks []Block `protobuf:"bytes,1,rep,name=queried_blocks,json=queriedBlocks,proto3" json:"queried_blocks"` +} + +func (m *LabelValuesResponseHints) Reset() { *m = LabelValuesResponseHints{} } +func (m *LabelValuesResponseHints) String() string { return proto.CompactTextString(m) } +func (*LabelValuesResponseHints) ProtoMessage() {} +func (*LabelValuesResponseHints) Descriptor() ([]byte, []int) { + return fileDescriptor_b82aa23c4c11e83f, []int{6} +} +func (m *LabelValuesResponseHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesResponseHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesResponseHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesResponseHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponseHints.Merge(m, src) +} +func (m *LabelValuesResponseHints) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesResponseHints) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesResponseHints.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesResponseHints proto.InternalMessageInfo + func init() { proto.RegisterType((*SeriesRequestHints)(nil), "hintspb.SeriesRequestHints") proto.RegisterType((*SeriesResponseHints)(nil), "hintspb.SeriesResponseHints") proto.RegisterType((*Block)(nil), "hintspb.Block") + proto.RegisterType((*LabelNamesRequestHints)(nil), "hintspb.LabelNamesRequestHints") + proto.RegisterType((*LabelNamesResponseHints)(nil), "hintspb.LabelNamesResponseHints") + proto.RegisterType((*LabelValuesRequestHints)(nil), "hintspb.LabelValuesRequestHints") + proto.RegisterType((*LabelValuesResponseHints)(nil), "hintspb.LabelValuesResponseHints") } func init() { proto.RegisterFile("store/hintspb/hints.proto", fileDescriptor_b82aa23c4c11e83f) } var fileDescriptor_b82aa23c4c11e83f = []byte{ - // 256 bytes of a gzipped FileDescriptorProto + // 295 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2c, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xc8, 0xcc, 0x2b, 0x29, 0x2e, 0x48, 0x82, 0xd0, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0x41, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0xb0, 0x98, 0x3e, 0x88, 0x05, @@ -162,10 +322,13 @@ var fileDescriptor_b82aa23c4c11e83f = []byte{ 0x17, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x42, 0x4c, 0xb6, 0xe6, 0xe2, 0x2b, 0x2c, 0x05, 0x89, 0xa7, 0xc4, 0x83, 0xd5, 0xc3, 0x4c, 0xe6, 0xd3, 0x83, 0x7a, 0x41, 0xcf, 0x09, 0x24, 0x0c, 0x33, 0x13, 0xaa, 0x16, 0x2c, 0x56, 0xac, 0x24, 0xce, 0xc5, 0x0a, 0x66, 0x09, 0xf1, 0x71, 0x31, 0x65, 0xa6, - 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x65, 0xa6, 0x38, 0xa9, 0x9e, 0x78, 0x28, 0xc7, - 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xb0, 0xd0, 0x49, 0x62, 0x03, - 0xfb, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x89, 0x23, 0x13, 0x63, 0x4a, 0x01, 0x00, 0x00, + 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x65, 0xa6, 0x28, 0x45, 0x73, 0x89, 0x81, 0x5d, + 0xe4, 0x97, 0x98, 0x4b, 0x7d, 0x9f, 0x84, 0x71, 0x89, 0x23, 0x1b, 0x4e, 0x35, 0xdf, 0xc4, 0x40, + 0xcd, 0x0d, 0x4b, 0xcc, 0x29, 0xa5, 0xbe, 0xab, 0xc3, 0xb9, 0x24, 0x50, 0x4c, 0xa7, 0x96, 0xb3, + 0x9d, 0x54, 0x4f, 0x3c, 0x94, 0x63, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, + 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, + 0x28, 0x58, 0x4a, 0x4c, 0x62, 0x03, 0xa7, 0x2f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x47, + 0x2f, 0x08, 0x1f, 0xb6, 0x02, 0x00, 0x00, } func (m *SeriesRequestHints) Marshal() (dAtA []byte, err error) { @@ -269,70 +432,537 @@ func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa } - return len(dAtA) - i, nil -} + return len(dAtA) - i, nil +} + +func (m *LabelNamesRequestHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesRequestHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesRequestHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for iNdEx := len(m.BlockMatchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockMatchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelNamesResponseHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesResponseHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesResponseHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for iNdEx := len(m.QueriedBlocks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.QueriedBlocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesRequestHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesRequestHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesRequestHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for iNdEx := len(m.BlockMatchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockMatchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesResponseHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesResponseHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesResponseHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for iNdEx := len(m.QueriedBlocks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.QueriedBlocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHints(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintHints(dAtA []byte, offset int, v uint64) int { + offset -= sovHints(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SeriesRequestHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for _, e := range m.BlockMatchers { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *SeriesResponseHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for _, e := range m.QueriedBlocks { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovHints(uint64(l)) + } + return n +} + +func (m *LabelNamesRequestHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for _, e := range m.BlockMatchers { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *LabelNamesResponseHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for _, e := range m.QueriedBlocks { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *LabelValuesRequestHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockMatchers) > 0 { + for _, e := range m.BlockMatchers { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func (m *LabelValuesResponseHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.QueriedBlocks) > 0 { + for _, e := range m.QueriedBlocks { + l = e.Size() + n += 1 + l + sovHints(uint64(l)) + } + } + return n +} + +func sovHints(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozHints(x uint64) (n int) { + return sovHints(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequestHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockMatchers = append(m.BlockMatchers, storepb.LabelMatcher{}) + if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesResponseHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueriedBlocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueriedBlocks = append(m.QueriedBlocks, Block{}) + if err := m.QueriedBlocks[len(m.QueriedBlocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func encodeVarintHints(dAtA []byte, offset int, v uint64) int { - offset -= sovHints(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + if iNdEx > l { + return io.ErrUnexpectedEOF } - dAtA[offset] = uint8(v) - return base + return nil } -func (m *SeriesRequestHints) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.BlockMatchers) > 0 { - for _, e := range m.BlockMatchers { - l = e.Size() - n += 1 + l + sovHints(uint64(l)) +func (m *Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - return n -} - -func (m *SeriesResponseHints) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.QueriedBlocks) > 0 { - for _, e := range m.QueriedBlocks { - l = e.Size() - n += 1 + l + sovHints(uint64(l)) + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - return n -} -func (m *Block) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovHints(uint64(l)) + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} - -func sovHints(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHints(x uint64) (n int) { - return sovHints(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { +func (m *LabelNamesRequestHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -355,10 +985,10 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeriesRequestHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesRequestHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -419,7 +1049,7 @@ func (m *SeriesRequestHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { +func (m *LabelNamesResponseHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -442,10 +1072,10 @@ func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SeriesResponseHints: wiretype end group for non-group") + return fmt.Errorf("proto: LabelNamesResponseHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SeriesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelNamesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -506,7 +1136,7 @@ func (m *SeriesResponseHints) Unmarshal(dAtA []byte) error { } return nil } -func (m *Block) Unmarshal(dAtA []byte) error { +func (m *LabelValuesRequestHints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -529,17 +1159,17 @@ func (m *Block) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Block: wiretype end group for non-group") + return fmt.Errorf("proto: LabelValuesRequestHints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LabelValuesRequestHints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockMatchers", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowHints @@ -549,23 +1179,112 @@ func (m *Block) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthHints } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthHints } if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + m.BlockMatchers = append(m.BlockMatchers, storepb.LabelMatcher{}) + if err := m.BlockMatchers[len(m.BlockMatchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHints(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHints + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesResponseHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesResponseHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesResponseHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueriedBlocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHints + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHints + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHints + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueriedBlocks = append(m.QueriedBlocks, Block{}) + if err := m.QueriedBlocks[len(m.QueriedBlocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto index 26dd9c90022fc..f7cf68d3ffe14 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/hintspb/hints.proto @@ -34,3 +34,28 @@ message SeriesResponseHints { message Block { string id = 1; } + + +message LabelNamesRequestHints { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + repeated thanos.LabelMatcher block_matchers = 1 [(gogoproto.nullable) = false]; +} + +message LabelNamesResponseHints { + /// queried_blocks is the list of blocks that have been queried. + repeated Block queried_blocks = 1 [(gogoproto.nullable) = false]; +} + +message LabelValuesRequestHints { + /// block_matchers is a list of label matchers that are evaluated against each single block's + /// labels to filter which blocks get queried. If the list is empty, no per-block filtering + /// is applied. + repeated thanos.LabelMatcher block_matchers = 1 [(gogoproto.nullable) = false]; +} + +message LabelValuesResponseHints { + /// queried_blocks is the list of blocks that have been queried. + repeated Block queried_blocks = 1 [(gogoproto.nullable) = false]; +} \ No newline at end of file diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go index 0301304ea78c8..902611e6587ed 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go @@ -15,14 +15,14 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" - "github.com/thanos-io/thanos/pkg/runutil" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/errutil" + "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/tracing" @@ -168,7 +168,7 @@ func (s *tenantSeriesSetServer) Delegate(closer io.Closer) { } func (s *tenantSeriesSetServer) Close() error { - var merr tsdb_errors.MultiError + var merr errutil.MultiError for _, c := range s.closers { merr.Add(c.Close()) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go index a93999b1f471d..3eec6a47205c0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -419,6 +419,10 @@ type LabelNamesRequest struct { PartialResponseStrategy PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` Start int64 `protobuf:"varint,3,opt,name=start,proto3" json:"start,omitempty"` End int64 `protobuf:"varint,4,opt,name=end,proto3" json:"end,omitempty"` + // hints is an opaque data structure that can be used to carry additional information. + // The content of this field and whether it's supported depends on the + // implementation of a specific store. + Hints *types.Any `protobuf:"bytes,5,opt,name=hints,proto3" json:"hints,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } @@ -457,6 +461,10 @@ var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo type LabelNamesResponse struct { Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + /// hints is an opaque data structure that can be used to carry additional information from + /// the store. The content of this field and whether it's supported depends on the + /// implementation of a specific store. + Hints *types.Any `protobuf:"bytes,3,opt,name=hints,proto3" json:"hints,omitempty"` } func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } @@ -499,6 +507,10 @@ type LabelValuesRequest struct { PartialResponseStrategy PartialResponseStrategy `protobuf:"varint,3,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` Start int64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` End int64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + // hints is an opaque data structure that can be used to carry additional information. + // The content of this field and whether it's supported depends on the + // implementation of a specific store. + Hints *types.Any `protobuf:"bytes,6,opt,name=hints,proto3" json:"hints,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } @@ -537,6 +549,10 @@ var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo type LabelValuesResponse struct { Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + /// hints is an opaque data structure that can be used to carry additional information from + /// the store. The content of this field and whether it's supported depends on the + /// implementation of a specific store. + Hints *types.Any `protobuf:"bytes,3,opt,name=hints,proto3" json:"hints,omitempty"` } func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } @@ -590,72 +606,73 @@ func init() { func init() { proto.RegisterFile("store/storepb/rpc.proto", fileDescriptor_a938d55a388af629) } var fileDescriptor_a938d55a388af629 = []byte{ - // 1031 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45, - 0x10, 0xf6, 0x78, 0xfc, 0x2c, 0x6f, 0xc2, 0x6c, 0xc7, 0xc9, 0x4e, 0xbc, 0x92, 0x63, 0x59, 0x42, - 0xb2, 0xa2, 0xc5, 0x06, 0x83, 0x56, 0x02, 0xed, 0xc5, 0x4e, 0xbc, 0x24, 0x62, 0xe3, 0x40, 0x3b, - 0xde, 0xc0, 0x22, 0x64, 0x8d, 0x9d, 0xde, 0xf1, 0x28, 0xf3, 0x62, 0xba, 0x4d, 0xe2, 0x2b, 0xdc, - 0x11, 0xe2, 0x3f, 0x21, 0xe5, 0xb8, 0x07, 0x0e, 0x88, 0xc3, 0x0a, 0x92, 0x23, 0x7f, 0x02, 0xf5, - 0x63, 0x1c, 0x4f, 0xc8, 0xe6, 0x12, 0x2e, 0x56, 0x57, 0x7d, 0x55, 0xd5, 0x5f, 0x7f, 0xd5, 0xd5, - 0x1e, 0x78, 0x44, 0x59, 0x10, 0x91, 0x96, 0xf8, 0x0d, 0xc7, 0xad, 0x28, 0x9c, 0x34, 0xc3, 0x28, - 0x60, 0x01, 0xca, 0xb1, 0xa9, 0xe5, 0x07, 0xb4, 0xb2, 0x99, 0x0c, 0x60, 0xf3, 0x90, 0x50, 0x19, - 0x52, 0x29, 0xdb, 0x81, 0x1d, 0x88, 0x65, 0x8b, 0xaf, 0x94, 0xb7, 0x96, 0x4c, 0x08, 0xa3, 0xc0, - 0xbb, 0x91, 0xa7, 0x4a, 0xba, 0xd6, 0x98, 0xb8, 0x37, 0x21, 0x3b, 0x08, 0x6c, 0x97, 0xb4, 0x84, - 0x35, 0x9e, 0xbd, 0x6e, 0x59, 0xfe, 0x5c, 0x42, 0xf5, 0xf7, 0x60, 0xe5, 0x38, 0x72, 0x18, 0xc1, - 0x84, 0x86, 0x81, 0x4f, 0x49, 0xfd, 0x27, 0x0d, 0x1e, 0x28, 0xcf, 0xf7, 0x33, 0x42, 0x19, 0xea, - 0x00, 0x30, 0xc7, 0x23, 0x94, 0x44, 0x0e, 0xa1, 0xa6, 0x56, 0xd3, 0x1b, 0xa5, 0xf6, 0x63, 0x9e, - 0xed, 0x11, 0x36, 0x25, 0x33, 0x3a, 0x9a, 0x04, 0xe1, 0xbc, 0x79, 0xe4, 0x78, 0x64, 0x20, 0x42, - 0xba, 0x99, 0x8b, 0xb7, 0x5b, 0x29, 0xbc, 0x94, 0x84, 0x36, 0x20, 0xc7, 0x88, 0x6f, 0xf9, 0xcc, - 0x4c, 0xd7, 0xb4, 0x46, 0x11, 0x2b, 0x0b, 0x99, 0x90, 0x8f, 0x48, 0xe8, 0x3a, 0x13, 0xcb, 0xd4, - 0x6b, 0x5a, 0x43, 0xc7, 0xb1, 0x59, 0x5f, 0x81, 0xd2, 0xbe, 0xff, 0x3a, 0x50, 0x1c, 0xea, 0xbf, - 0xa6, 0xe1, 0x81, 0xb4, 0x25, 0x4b, 0x34, 0x81, 0x9c, 0x38, 0x68, 0x4c, 0x68, 0xa5, 0x29, 0x85, - 0x6d, 0xbe, 0xe0, 0xde, 0xee, 0x33, 0x4e, 0xe1, 0xcf, 0xb7, 0x5b, 0x9f, 0xd8, 0x0e, 0x9b, 0xce, - 0xc6, 0xcd, 0x49, 0xe0, 0xb5, 0x64, 0xc0, 0x07, 0x4e, 0xa0, 0x56, 0xad, 0xf0, 0xd4, 0x6e, 0x25, - 0x34, 0x6b, 0xbe, 0x12, 0xd9, 0x58, 0x95, 0x46, 0x9b, 0x50, 0xf0, 0x1c, 0x7f, 0xc4, 0x0f, 0x22, - 0x88, 0xeb, 0x38, 0xef, 0x39, 0x3e, 0x3f, 0xa9, 0x80, 0xac, 0x73, 0x09, 0x29, 0xea, 0x9e, 0x75, - 0x2e, 0xa0, 0x16, 0x14, 0x45, 0xd5, 0xa3, 0x79, 0x48, 0xcc, 0x4c, 0x4d, 0x6b, 0xac, 0xb6, 0x1f, - 0xc6, 0xec, 0x06, 0x31, 0x80, 0xaf, 0x63, 0xd0, 0x53, 0x00, 0xb1, 0xe1, 0x88, 0x12, 0x46, 0xcd, - 0xac, 0x38, 0xcf, 0x22, 0x43, 0x52, 0x1a, 0x10, 0xa6, 0x64, 0x2d, 0xba, 0xca, 0xa6, 0xf5, 0xdf, - 0x74, 0x58, 0x91, 0x92, 0xc7, 0xad, 0x5a, 0x26, 0xac, 0xbd, 0x9b, 0x70, 0x3a, 0x49, 0xf8, 0x29, - 0x87, 0xd8, 0x64, 0x4a, 0x22, 0x6a, 0xea, 0x62, 0xf7, 0x72, 0x42, 0xcd, 0x03, 0x09, 0x2a, 0x02, - 0x8b, 0x58, 0xd4, 0x86, 0x75, 0x5e, 0x32, 0x22, 0x34, 0x70, 0x67, 0xcc, 0x09, 0xfc, 0xd1, 0x99, - 0xe3, 0x9f, 0x04, 0x67, 0xe2, 0xd0, 0x3a, 0x5e, 0xf3, 0xac, 0x73, 0xbc, 0xc0, 0x8e, 0x05, 0x84, - 0x9e, 0x00, 0x58, 0xb6, 0x1d, 0x11, 0xdb, 0x62, 0x44, 0x9e, 0x75, 0xb5, 0xfd, 0x20, 0xde, 0xad, - 0x63, 0xdb, 0x11, 0x5e, 0xc2, 0xd1, 0x67, 0xb0, 0x19, 0x5a, 0x11, 0x73, 0x2c, 0x97, 0xef, 0x22, - 0x3a, 0x3f, 0x3a, 0x71, 0xa8, 0x35, 0x76, 0xc9, 0x89, 0x99, 0xab, 0x69, 0x8d, 0x02, 0x7e, 0xa4, - 0x02, 0xe2, 0x9b, 0xb1, 0xab, 0x60, 0xf4, 0xed, 0x2d, 0xb9, 0x94, 0x45, 0x16, 0x23, 0xf6, 0xdc, - 0xcc, 0x8b, 0xb6, 0x6c, 0xc5, 0x1b, 0x7f, 0x99, 0xac, 0x31, 0x50, 0x61, 0xff, 0x29, 0x1e, 0x03, - 0x68, 0x0b, 0x4a, 0xf4, 0xd4, 0x09, 0x47, 0x93, 0xe9, 0xcc, 0x3f, 0xa5, 0x66, 0x41, 0x50, 0x01, - 0xee, 0xda, 0x11, 0x1e, 0xb4, 0x0d, 0xd9, 0xa9, 0xe3, 0x33, 0x6a, 0x16, 0x6b, 0x9a, 0x10, 0x54, - 0x4e, 0x60, 0x33, 0x9e, 0xc0, 0x66, 0xc7, 0x9f, 0x63, 0x19, 0x52, 0xff, 0x59, 0x83, 0xd5, 0xb8, - 0x8f, 0xea, 0x7a, 0x37, 0x20, 0xb7, 0x98, 0x37, 0x9e, 0xbf, 0xba, 0xb8, 0x40, 0xc2, 0xbb, 0x97, - 0xc2, 0x0a, 0x47, 0x15, 0xc8, 0x9f, 0x59, 0x91, 0xef, 0xf8, 0xb6, 0x9c, 0xad, 0xbd, 0x14, 0x8e, - 0x1d, 0xe8, 0x49, 0x4c, 0x42, 0x7f, 0x37, 0x89, 0xbd, 0x94, 0xa2, 0xd1, 0x2d, 0x40, 0x2e, 0x22, - 0x74, 0xe6, 0xb2, 0xfa, 0xef, 0x1a, 0x3c, 0x14, 0x9d, 0xef, 0x5b, 0xde, 0xf5, 0xe5, 0xba, 0xb3, - 0x19, 0xda, 0x3d, 0x9a, 0x91, 0xbe, 0x67, 0x33, 0xca, 0x90, 0xa5, 0xcc, 0x8a, 0x98, 0x1a, 0x44, - 0x69, 0x20, 0x03, 0x74, 0xe2, 0x9f, 0xa8, 0xbb, 0xc8, 0x97, 0xf5, 0xe7, 0x80, 0x96, 0x4f, 0xa5, - 0xa4, 0x2e, 0x43, 0xd6, 0xe7, 0x0e, 0xf1, 0x90, 0x14, 0xb1, 0x34, 0x50, 0x05, 0x0a, 0x4a, 0x45, - 0x6a, 0xa6, 0x05, 0xb0, 0xb0, 0xeb, 0xff, 0x68, 0xaa, 0xd0, 0x4b, 0xcb, 0x9d, 0x5d, 0xeb, 0x53, - 0x86, 0xac, 0x98, 0x4d, 0xa1, 0x45, 0x11, 0x4b, 0xe3, 0x6e, 0xd5, 0xd2, 0xf7, 0x50, 0x4d, 0xff, - 0xbf, 0x54, 0xcb, 0xdc, 0xa2, 0x5a, 0xf6, 0x5a, 0xb5, 0x7d, 0x58, 0x4b, 0x1c, 0x56, 0xc9, 0xb6, - 0x01, 0xb9, 0x1f, 0x84, 0x47, 0xe9, 0xa6, 0xac, 0xbb, 0x84, 0xdb, 0xfe, 0x0e, 0x8a, 0x8b, 0x07, - 0x10, 0x95, 0x20, 0x3f, 0xec, 0x7f, 0xd1, 0x3f, 0x3c, 0xee, 0x1b, 0x29, 0x54, 0x84, 0xec, 0x57, - 0xc3, 0x1e, 0xfe, 0xc6, 0xd0, 0x50, 0x01, 0x32, 0x78, 0xf8, 0xa2, 0x67, 0xa4, 0x79, 0xc4, 0x60, - 0x7f, 0xb7, 0xb7, 0xd3, 0xc1, 0x86, 0xce, 0x23, 0x06, 0x47, 0x87, 0xb8, 0x67, 0x64, 0xb8, 0x1f, - 0xf7, 0x76, 0x7a, 0xfb, 0x2f, 0x7b, 0x46, 0x96, 0xfb, 0x77, 0x7b, 0xdd, 0xe1, 0xe7, 0x46, 0x6e, - 0xbb, 0x0b, 0x19, 0xfe, 0x82, 0xa0, 0x3c, 0xe8, 0xb8, 0x73, 0x2c, 0xab, 0xee, 0x1c, 0x0e, 0xfb, - 0x47, 0x86, 0xc6, 0x7d, 0x83, 0xe1, 0x81, 0x91, 0xe6, 0x8b, 0x83, 0xfd, 0xbe, 0xa1, 0x8b, 0x45, - 0xe7, 0x6b, 0x59, 0x4e, 0x44, 0xf5, 0xb0, 0x91, 0x6d, 0xff, 0x98, 0x86, 0xac, 0xe0, 0x88, 0x3e, - 0x82, 0x0c, 0xff, 0xc7, 0x41, 0x6b, 0xb1, 0xc2, 0x4b, 0xff, 0x47, 0x95, 0x72, 0xd2, 0xa9, 0x34, - 0xf9, 0x14, 0x72, 0x72, 0x3e, 0xd1, 0x7a, 0x72, 0x5e, 0xe3, 0xb4, 0x8d, 0x9b, 0x6e, 0x99, 0xf8, - 0xa1, 0x86, 0x76, 0x00, 0xae, 0xef, 0x26, 0xda, 0x4c, 0xbc, 0xbf, 0xcb, 0x53, 0x58, 0xa9, 0xdc, - 0x06, 0xa9, 0xfd, 0x9f, 0x43, 0x69, 0xa9, 0x55, 0x28, 0x19, 0x9a, 0xb8, 0xac, 0x95, 0xc7, 0xb7, - 0x62, 0xb2, 0x4e, 0xbb, 0x0f, 0xab, 0xe2, 0x0b, 0x80, 0xdf, 0x42, 0x29, 0xc6, 0x33, 0x28, 0x61, - 0xe2, 0x05, 0x8c, 0x08, 0x3f, 0x5a, 0x1c, 0x7f, 0xf9, 0x43, 0xa1, 0xb2, 0x7e, 0xc3, 0xab, 0x3e, - 0x28, 0x52, 0xdd, 0xf7, 0x2f, 0xfe, 0xae, 0xa6, 0x2e, 0x2e, 0xab, 0xda, 0x9b, 0xcb, 0xaa, 0xf6, - 0xd7, 0x65, 0x55, 0xfb, 0xe5, 0xaa, 0x9a, 0x7a, 0x73, 0x55, 0x4d, 0xfd, 0x71, 0x55, 0x4d, 0xbd, - 0xca, 0xab, 0x6f, 0x9a, 0x71, 0x4e, 0xbc, 0x4b, 0x1f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xd2, - 0x17, 0x25, 0x76, 0x3d, 0x09, 0x00, 0x00, + // 1045 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0x7a, 0xfd, 0xf9, 0x9c, 0x84, 0xed, 0xc4, 0x49, 0x37, 0xae, 0xe4, 0x58, 0x96, 0x90, + 0xac, 0xa8, 0xd8, 0x60, 0x50, 0x25, 0x50, 0x2f, 0x76, 0x62, 0x48, 0x44, 0xe3, 0xc0, 0x38, 0x6e, + 0xa0, 0x08, 0x59, 0x6b, 0x67, 0xba, 0x5e, 0xc5, 0xfb, 0xc1, 0xce, 0x98, 0xc4, 0x37, 0x04, 0x77, + 0x84, 0xb8, 0xf0, 0x17, 0x21, 0xe5, 0xd8, 0x23, 0xe2, 0x50, 0x41, 0xf2, 0x8f, 0xa0, 0xf9, 0x58, + 0xdb, 0x1b, 0xd2, 0xe6, 0x90, 0x5e, 0xac, 0x79, 0xef, 0xf7, 0x3e, 0x7e, 0xf3, 0x7e, 0x33, 0xe3, + 0x85, 0x87, 0x94, 0xf9, 0x21, 0x69, 0x88, 0xdf, 0x60, 0xd8, 0x08, 0x83, 0x51, 0x3d, 0x08, 0x7d, + 0xe6, 0xa3, 0x0c, 0x1b, 0x5b, 0x9e, 0x4f, 0x4b, 0x5b, 0xf1, 0x00, 0x36, 0x0b, 0x08, 0x95, 0x21, + 0xa5, 0xa2, 0xed, 0xdb, 0xbe, 0x58, 0x36, 0xf8, 0x4a, 0x79, 0x2b, 0xf1, 0x84, 0x20, 0xf4, 0xdd, + 0x1b, 0x79, 0xaa, 0xe4, 0xc4, 0x1a, 0x92, 0xc9, 0x4d, 0xc8, 0xf6, 0x7d, 0x7b, 0x42, 0x1a, 0xc2, + 0x1a, 0x4e, 0x5f, 0x36, 0x2c, 0x6f, 0x26, 0xa1, 0xea, 0x7b, 0xb0, 0x7a, 0x12, 0x3a, 0x8c, 0x60, + 0x42, 0x03, 0xdf, 0xa3, 0xa4, 0xfa, 0x8b, 0x06, 0x2b, 0xca, 0xf3, 0xc3, 0x94, 0x50, 0x86, 0x5a, + 0x00, 0xcc, 0x71, 0x09, 0x25, 0xa1, 0x43, 0xa8, 0xa9, 0x55, 0xf4, 0x5a, 0xa1, 0xf9, 0x88, 0x67, + 0xbb, 0x84, 0x8d, 0xc9, 0x94, 0x0e, 0x46, 0x7e, 0x30, 0xab, 0x1f, 0x3b, 0x2e, 0xe9, 0x89, 0x90, + 0x76, 0xea, 0xf2, 0xf5, 0x76, 0x02, 0x2f, 0x25, 0xa1, 0x4d, 0xc8, 0x30, 0xe2, 0x59, 0x1e, 0x33, + 0x93, 0x15, 0xad, 0x96, 0xc7, 0xca, 0x42, 0x26, 0x64, 0x43, 0x12, 0x4c, 0x9c, 0x91, 0x65, 0xea, + 0x15, 0xad, 0xa6, 0xe3, 0xc8, 0xac, 0xae, 0x42, 0xe1, 0xc0, 0x7b, 0xe9, 0x2b, 0x0e, 0xd5, 0xdf, + 0x93, 0xb0, 0x22, 0x6d, 0xc9, 0x12, 0x8d, 0x20, 0x23, 0x36, 0x1a, 0x11, 0x5a, 0xad, 0xcb, 0xc1, + 0xd6, 0x9f, 0x71, 0x6f, 0xfb, 0x29, 0xa7, 0xf0, 0xf7, 0xeb, 0xed, 0x4f, 0x6c, 0x87, 0x8d, 0xa7, + 0xc3, 0xfa, 0xc8, 0x77, 0x1b, 0x32, 0xe0, 0x03, 0xc7, 0x57, 0xab, 0x46, 0x70, 0x66, 0x37, 0x62, + 0x33, 0xab, 0xbf, 0x10, 0xd9, 0x58, 0x95, 0x46, 0x5b, 0x90, 0x73, 0x1d, 0x6f, 0xc0, 0x37, 0x22, + 0x88, 0xeb, 0x38, 0xeb, 0x3a, 0x1e, 0xdf, 0xa9, 0x80, 0xac, 0x0b, 0x09, 0x29, 0xea, 0xae, 0x75, + 0x21, 0xa0, 0x06, 0xe4, 0x45, 0xd5, 0xe3, 0x59, 0x40, 0xcc, 0x54, 0x45, 0xab, 0xad, 0x35, 0x1f, + 0x44, 0xec, 0x7a, 0x11, 0x80, 0x17, 0x31, 0xe8, 0x09, 0x80, 0x68, 0x38, 0xa0, 0x84, 0x51, 0x33, + 0x2d, 0xf6, 0x33, 0xcf, 0x90, 0x94, 0x7a, 0x84, 0xa9, 0xb1, 0xe6, 0x27, 0xca, 0xa6, 0xd5, 0x3f, + 0x75, 0x58, 0x95, 0x23, 0x8f, 0xa4, 0x5a, 0x26, 0xac, 0xbd, 0x99, 0x70, 0x32, 0x4e, 0xf8, 0x09, + 0x87, 0xd8, 0x68, 0x4c, 0x42, 0x6a, 0xea, 0xa2, 0x7b, 0x31, 0x36, 0xcd, 0x43, 0x09, 0x2a, 0x02, + 0xf3, 0x58, 0xd4, 0x84, 0x0d, 0x5e, 0x32, 0x24, 0xd4, 0x9f, 0x4c, 0x99, 0xe3, 0x7b, 0x83, 0x73, + 0xc7, 0x3b, 0xf5, 0xcf, 0xc5, 0xa6, 0x75, 0xbc, 0xee, 0x5a, 0x17, 0x78, 0x8e, 0x9d, 0x08, 0x08, + 0x3d, 0x06, 0xb0, 0x6c, 0x3b, 0x24, 0xb6, 0xc5, 0x88, 0xdc, 0xeb, 0x5a, 0x73, 0x25, 0xea, 0xd6, + 0xb2, 0xed, 0x10, 0x2f, 0xe1, 0xe8, 0x33, 0xd8, 0x0a, 0xac, 0x90, 0x39, 0xd6, 0x84, 0x77, 0x11, + 0xca, 0x0f, 0x4e, 0x1d, 0x6a, 0x0d, 0x27, 0xe4, 0xd4, 0xcc, 0x54, 0xb4, 0x5a, 0x0e, 0x3f, 0x54, + 0x01, 0xd1, 0xc9, 0xd8, 0x53, 0x30, 0xfa, 0xee, 0x96, 0x5c, 0xca, 0x42, 0x8b, 0x11, 0x7b, 0x66, + 0x66, 0x85, 0x2c, 0xdb, 0x51, 0xe3, 0xaf, 0xe2, 0x35, 0x7a, 0x2a, 0xec, 0x7f, 0xc5, 0x23, 0x00, + 0x6d, 0x43, 0x81, 0x9e, 0x39, 0xc1, 0x60, 0x34, 0x9e, 0x7a, 0x67, 0xd4, 0xcc, 0x09, 0x2a, 0xc0, + 0x5d, 0xbb, 0xc2, 0x83, 0x76, 0x20, 0x3d, 0x76, 0x3c, 0x46, 0xcd, 0x7c, 0x45, 0x13, 0x03, 0x95, + 0x37, 0xb0, 0x1e, 0xdd, 0xc0, 0x7a, 0xcb, 0x9b, 0x61, 0x19, 0x52, 0xfd, 0x55, 0x83, 0xb5, 0x48, + 0x47, 0x75, 0xbc, 0x6b, 0x90, 0x99, 0xdf, 0x37, 0x9e, 0xbf, 0x36, 0x3f, 0x40, 0xc2, 0xbb, 0x9f, + 0xc0, 0x0a, 0x47, 0x25, 0xc8, 0x9e, 0x5b, 0xa1, 0xe7, 0x78, 0xb6, 0xbc, 0x5b, 0xfb, 0x09, 0x1c, + 0x39, 0xd0, 0xe3, 0x88, 0x84, 0xfe, 0x66, 0x12, 0xfb, 0x09, 0x45, 0xa3, 0x9d, 0x83, 0x4c, 0x48, + 0xe8, 0x74, 0xc2, 0xaa, 0x3f, 0x25, 0xe1, 0x81, 0x50, 0xbe, 0x6b, 0xb9, 0x8b, 0xc3, 0xf5, 0x56, + 0x31, 0xb4, 0x7b, 0x88, 0x91, 0xbc, 0xa7, 0x18, 0x45, 0x48, 0x53, 0x66, 0x85, 0x4c, 0x5d, 0x44, + 0x69, 0x20, 0x03, 0x74, 0xe2, 0x9d, 0xaa, 0xb3, 0xc8, 0x97, 0x0b, 0x4d, 0xd2, 0x77, 0x6b, 0x12, + 0x02, 0x5a, 0x9e, 0x80, 0x92, 0xa5, 0x08, 0x69, 0x8f, 0x3b, 0xc4, 0xa3, 0x93, 0xc7, 0xd2, 0x40, + 0x25, 0xc8, 0xa9, 0x89, 0x53, 0x33, 0x29, 0x80, 0xb9, 0xbd, 0xe8, 0xa9, 0xdf, 0xdd, 0xf3, 0x8f, + 0xa4, 0x6a, 0xfa, 0xdc, 0x9a, 0x4c, 0x17, 0x73, 0x2f, 0x42, 0x5a, 0xdc, 0x79, 0x31, 0xe3, 0x3c, + 0x96, 0xc6, 0xdb, 0xd5, 0x48, 0xde, 0x43, 0x0d, 0xfd, 0x5d, 0xa9, 0x91, 0xba, 0x45, 0x8d, 0xf4, + 0x2d, 0x6a, 0x64, 0xee, 0x9e, 0xcc, 0x14, 0xd6, 0x63, 0x83, 0x51, 0x72, 0x6c, 0x42, 0xe6, 0x47, + 0xe1, 0x51, 0x7a, 0x28, 0xeb, 0x5d, 0x09, 0xb2, 0xf3, 0x3d, 0xe4, 0xe7, 0x0f, 0x36, 0x2a, 0x40, + 0xb6, 0xdf, 0xfd, 0xb2, 0x7b, 0x74, 0xd2, 0x35, 0x12, 0x28, 0x0f, 0xe9, 0xaf, 0xfb, 0x1d, 0xfc, + 0xad, 0xa1, 0xa1, 0x1c, 0xa4, 0x70, 0xff, 0x59, 0xc7, 0x48, 0xf2, 0x88, 0xde, 0xc1, 0x5e, 0x67, + 0xb7, 0x85, 0x0d, 0x9d, 0x47, 0xf4, 0x8e, 0x8f, 0x70, 0xc7, 0x48, 0x71, 0x3f, 0xee, 0xec, 0x76, + 0x0e, 0x9e, 0x77, 0x8c, 0x34, 0xf7, 0xef, 0x75, 0xda, 0xfd, 0x2f, 0x8c, 0xcc, 0x4e, 0x1b, 0x52, + 0xfc, 0xc5, 0x43, 0x59, 0xd0, 0x71, 0xeb, 0x44, 0x56, 0xdd, 0x3d, 0xea, 0x77, 0x8f, 0x0d, 0x8d, + 0xfb, 0x7a, 0xfd, 0x43, 0x23, 0xc9, 0x17, 0x87, 0x07, 0x5d, 0x43, 0x17, 0x8b, 0xd6, 0x37, 0xb2, + 0x9c, 0x88, 0xea, 0x60, 0x23, 0xdd, 0xfc, 0x39, 0x09, 0x69, 0xc1, 0x11, 0x7d, 0x04, 0x29, 0xfe, + 0x0f, 0x89, 0xd6, 0x23, 0xe5, 0x96, 0xfe, 0x3f, 0x4b, 0xc5, 0xb8, 0x53, 0xcd, 0xef, 0x53, 0xc8, + 0xc8, 0xf7, 0x04, 0x6d, 0xc4, 0xdf, 0x97, 0x28, 0x6d, 0xf3, 0xa6, 0x5b, 0x26, 0x7e, 0xa8, 0xa1, + 0x5d, 0x80, 0xc5, 0xfd, 0x40, 0x5b, 0xb1, 0xff, 0x8b, 0xe5, 0x57, 0xa3, 0x54, 0xba, 0x0d, 0x52, + 0xfd, 0x3f, 0x87, 0xc2, 0x92, 0xac, 0x28, 0x1e, 0x1a, 0xbb, 0x04, 0xa5, 0x47, 0xb7, 0x62, 0xb2, + 0x4e, 0xb3, 0x0b, 0x6b, 0xe2, 0x8b, 0x85, 0x9f, 0x6e, 0x39, 0x8c, 0xa7, 0x50, 0xc0, 0xc4, 0xf5, + 0x19, 0x11, 0x7e, 0x34, 0xdf, 0xfe, 0xf2, 0x87, 0x4d, 0x69, 0xe3, 0x86, 0x57, 0x7d, 0x00, 0x25, + 0xda, 0xef, 0x5f, 0xfe, 0x5b, 0x4e, 0x5c, 0x5e, 0x95, 0xb5, 0x57, 0x57, 0x65, 0xed, 0x9f, 0xab, + 0xb2, 0xf6, 0xdb, 0x75, 0x39, 0xf1, 0xea, 0xba, 0x9c, 0xf8, 0xeb, 0xba, 0x9c, 0x78, 0x91, 0x55, + 0xdf, 0x60, 0xc3, 0x8c, 0x38, 0x33, 0x1f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x7f, 0x4f, + 0x30, 0xed, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1352,6 +1369,18 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.End != 0 { i = encodeVarintRpc(dAtA, i, uint64(m.End)) i-- @@ -1400,6 +1429,18 @@ func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if len(m.Warnings) > 0 { for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Warnings[iNdEx]) @@ -1441,6 +1482,18 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if m.End != 0 { i = encodeVarintRpc(dAtA, i, uint64(m.End)) i-- @@ -1496,6 +1549,18 @@ func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if len(m.Warnings) > 0 { for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Warnings[iNdEx]) @@ -1706,6 +1771,10 @@ func (m *LabelNamesRequest) Size() (n int) { if m.End != 0 { n += 1 + sovRpc(uint64(m.End)) } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -1727,6 +1796,10 @@ func (m *LabelNamesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -1752,6 +1825,10 @@ func (m *LabelValuesRequest) Size() (n int) { if m.End != 0 { n += 1 + sovRpc(uint64(m.End)) } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -1773,6 +1850,10 @@ func (m *LabelValuesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -2773,6 +2854,42 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -2890,6 +3007,42 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { } m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3052,6 +3205,42 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -3169,6 +3358,42 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { } m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &types.Any{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto index 2418d30c336c8..f1d2f181b7b16 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -145,11 +145,21 @@ message LabelNamesRequest { int64 start = 3; int64 end = 4; + + // hints is an opaque data structure that can be used to carry additional information. + // The content of this field and whether it's supported depends on the + // implementation of a specific store. + google.protobuf.Any hints = 5; } message LabelNamesResponse { repeated string names = 1; repeated string warnings = 2; + + /// hints is an opaque data structure that can be used to carry additional information from + /// the store. The content of this field and whether it's supported depends on the + /// implementation of a specific store. + google.protobuf.Any hints = 3; } message LabelValuesRequest { @@ -163,9 +173,19 @@ message LabelValuesRequest { int64 start = 4; int64 end = 5; + + // hints is an opaque data structure that can be used to carry additional information. + // The content of this field and whether it's supported depends on the + // implementation of a specific store. + google.protobuf.Any hints = 6; } message LabelValuesResponse { repeated string values = 1; repeated string warnings = 2; + + /// hints is an opaque data structure that can be used to carry additional information from + /// the store. The content of this field and whether it's supported depends on the + /// implementation of a specific store. + google.protobuf.Any hints = 3; } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/testutil/series.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/testutil/series.go deleted file mode 100644 index 213b6335bf9bb..0000000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/testutil/series.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package storetestutil - -import ( - "context" - "fmt" - "math" - "math/rand" - "os" - "path/filepath" - "runtime" - "sort" - "testing" - - "github.com/gogo/protobuf/types" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb" - "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/index" - "github.com/prometheus/prometheus/tsdb/wal" - "github.com/thanos-io/thanos/pkg/store/hintspb" - "github.com/thanos-io/thanos/pkg/store/labelpb" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/testutil" -) - -const ( - // LabelLongSuffix is a label with ~50B in size, to emulate real-world high cardinality. - LabelLongSuffix = "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd" -) - -func allPostings(t testing.TB, ix tsdb.IndexReader) index.Postings { - k, v := index.AllPostingsKey() - p, err := ix.Postings(k, v) - testutil.Ok(t, err) - return p -} - -const RemoteReadFrameLimit = 1048576 - -type HeadGenOptions struct { - TSDBDir string - SamplesPerSeries, Series int - - WithWAL bool - PrependLabels labels.Labels - SkipChunks bool // Skips chunks in returned slice (not in generated head!). - - Random *rand.Rand -} - -// CreateHeadWithSeries returns head filled with given samples and same series returned in separate list for assertion purposes. -// Returned series list has "ext1"="1" prepended. Each series looks as follows: -// {foo=bar,i=000001aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd} where number indicate sample number from 0. -// Returned series are frame in same way as remote read would frame them. -func CreateHeadWithSeries(t testing.TB, j int, opts HeadGenOptions) (*tsdb.Head, []*storepb.Series) { - if opts.SamplesPerSeries < 1 || opts.Series < 1 { - t.Fatal("samples and series has to be 1 or more") - } - - fmt.Printf("Creating %d %d-sample series in %s\n", opts.Series, opts.SamplesPerSeries, opts.TSDBDir) - - var w *wal.WAL - var err error - if opts.WithWAL { - w, err = wal.New(nil, nil, filepath.Join(opts.TSDBDir, "wal"), true) - testutil.Ok(t, err) - } else { - testutil.Ok(t, os.MkdirAll(filepath.Join(opts.TSDBDir, "wal"), os.ModePerm)) - } - - h, err := tsdb.NewHead(nil, nil, w, tsdb.DefaultBlockDuration, opts.TSDBDir, nil, tsdb.DefaultStripeSize, nil) - testutil.Ok(t, err) - - app := h.Appender(context.Background()) - for i := 0; i < opts.Series; i++ { - ts := int64(j*opts.Series*opts.SamplesPerSeries + i*opts.SamplesPerSeries) - ref, err := app.Add(labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%07d%s", ts, LabelLongSuffix)), ts, opts.Random.Float64()) - testutil.Ok(t, err) - - for is := 1; is < opts.SamplesPerSeries; is++ { - testutil.Ok(t, app.AddFast(ref, ts+int64(is), opts.Random.Float64())) - } - } - testutil.Ok(t, app.Commit()) - - // Use TSDB and get all series for assertion. - chks, err := h.Chunks() - testutil.Ok(t, err) - defer func() { testutil.Ok(t, chks.Close()) }() - - ir, err := h.Index() - testutil.Ok(t, err) - defer func() { testutil.Ok(t, ir.Close()) }() - - var ( - lset labels.Labels - chunkMetas []chunks.Meta - expected = make([]*storepb.Series, 0, opts.Series) - ) - - all := allPostings(t, ir) - for all.Next() { - testutil.Ok(t, ir.Series(all.At(), &lset, &chunkMetas)) - expected = append(expected, &storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(append(opts.PrependLabels.Copy(), lset...))}) - - if opts.SkipChunks { - continue - } - - for _, c := range chunkMetas { - chEnc, err := chks.Chunk(c.Ref) - testutil.Ok(t, err) - - // Open Chunk. - if c.MaxTime == math.MaxInt64 { - c.MaxTime = c.MinTime + int64(chEnc.NumSamples()) - 1 - } - - expected[len(expected)-1].Chunks = append(expected[len(expected)-1].Chunks, storepb.AggrChunk{ - MinTime: c.MinTime, - MaxTime: c.MaxTime, - Raw: &storepb.Chunk{Type: storepb.Chunk_XOR, Data: chEnc.Bytes()}, - }) - } - } - testutil.Ok(t, all.Err()) - return h, expected -} - -// SeriesServer is test gRPC storeAPI series server. -type SeriesServer struct { - // This field just exist to pseudo-implement the unused methods of the interface. - storepb.Store_SeriesServer - - ctx context.Context - - SeriesSet []*storepb.Series - Warnings []string - HintsSet []*types.Any - - Size int64 -} - -func NewSeriesServer(ctx context.Context) *SeriesServer { - return &SeriesServer{ctx: ctx} -} - -func (s *SeriesServer) Send(r *storepb.SeriesResponse) error { - s.Size += int64(r.Size()) - - if r.GetWarning() != "" { - s.Warnings = append(s.Warnings, r.GetWarning()) - return nil - } - - if r.GetSeries() != nil { - s.SeriesSet = append(s.SeriesSet, r.GetSeries()) - return nil - } - - if r.GetHints() != nil { - s.HintsSet = append(s.HintsSet, r.GetHints()) - return nil - } - // Unsupported field, skip. - return nil -} - -func (s *SeriesServer) Context() context.Context { - return s.ctx -} - -func RunSeriesInterestingCases(t testutil.TB, maxSamples, maxSeries int, f func(t testutil.TB, samplesPerSeries, series int)) { - for _, tc := range []struct { - samplesPerSeries int - series int - }{ - { - samplesPerSeries: 1, - series: maxSeries, - }, - { - samplesPerSeries: maxSamples / (maxSeries / 10), - series: maxSeries / 10, - }, - { - samplesPerSeries: maxSamples, - series: 1, - }, - } { - if ok := t.Run(fmt.Sprintf("%dSeriesWith%dSamples", tc.series, tc.samplesPerSeries), func(t testutil.TB) { - f(t, tc.samplesPerSeries, tc.series) - }); !ok { - return - } - runtime.GC() - } -} - -// SeriesCase represents single test/benchmark case for testing storepb series. -type SeriesCase struct { - Name string - Req *storepb.SeriesRequest - - // Exact expectations are checked only for tests. For benchmarks only length is assured. - ExpectedSeries []*storepb.Series - ExpectedWarnings []string - ExpectedHints []hintspb.SeriesResponseHints -} - -// TestServerSeries runs tests against given cases. -func TestServerSeries(t testutil.TB, store storepb.StoreServer, cases ...*SeriesCase) { - for _, c := range cases { - t.Run(c.Name, func(t testutil.TB) { - t.ResetTimer() - for i := 0; i < t.N(); i++ { - srv := NewSeriesServer(context.Background()) - testutil.Ok(t, store.Series(c.Req, srv)) - testutil.Equals(t, len(c.ExpectedWarnings), len(srv.Warnings), "%v", srv.Warnings) - testutil.Equals(t, len(c.ExpectedSeries), len(srv.SeriesSet)) - testutil.Equals(t, len(c.ExpectedHints), len(srv.HintsSet)) - - if !t.IsBenchmark() { - if len(c.ExpectedSeries) == 1 { - // For bucketStoreAPI chunks are not sorted within response. TODO: Investigate: Is this fine? - sort.Slice(srv.SeriesSet[0].Chunks, func(i, j int) bool { - return srv.SeriesSet[0].Chunks[i].MinTime < srv.SeriesSet[0].Chunks[j].MinTime - }) - } - - // Huge responses can produce unreadable diffs - make it more human readable. - if len(c.ExpectedSeries) > 4 { - for j := range c.ExpectedSeries { - testutil.Equals(t, c.ExpectedSeries[j].Labels, srv.SeriesSet[j].Labels, "%v series chunks mismatch", j) - if len(c.ExpectedSeries[j].Chunks) > 20 { - testutil.Equals(t, len(c.ExpectedSeries[j].Chunks), len(srv.SeriesSet[j].Chunks), "%v series chunks number mismatch", j) - } - testutil.Equals(t, c.ExpectedSeries[j].Chunks, srv.SeriesSet[j].Chunks, "%v series chunks mismatch", j) - } - } else { - testutil.Equals(t, c.ExpectedSeries, srv.SeriesSet) - } - - var actualHints []hintspb.SeriesResponseHints - for _, anyHints := range srv.HintsSet { - hints := hintspb.SeriesResponseHints{} - testutil.Ok(t, types.UnmarshalAny(anyHints, &hints)) - actualHints = append(actualHints, hints) - } - testutil.Equals(t, c.ExpectedHints, actualHints) - } - } - }) - } -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index a700b48eb22f5..c9f4421e7135a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -14,7 +14,6 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/store/labelpb" - storetestutil "github.com/thanos-io/thanos/pkg/store/storepb/testutil" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -24,6 +23,8 @@ import ( "github.com/thanos-io/thanos/pkg/store/storepb" ) +const RemoteReadFrameLimit = 1048576 + type TSDBReader interface { storage.ChunkQueryable StartTime() (int64, error) @@ -62,7 +63,7 @@ func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, com db: db, component: component, externalLabels: externalLabels, - maxBytesPerFrame: storetestutil.RemoteReadFrameLimit, + maxBytesPerFrame: RemoteReadFrameLimit, } } diff --git a/vendor/github.com/weaveworks/common/middleware/http_tracing.go b/vendor/github.com/weaveworks/common/middleware/http_tracing.go index a1ac40f52b0be..dfcafa8ebabc8 100644 --- a/vendor/github.com/weaveworks/common/middleware/http_tracing.go +++ b/vendor/github.com/weaveworks/common/middleware/http_tracing.go @@ -54,3 +54,18 @@ func ExtractTraceID(ctx context.Context) (string, bool) { return sctx.TraceID().String(), true } + +// ExtractSampledTraceID works like ExtractTraceID but the returned bool is only +// true if the returned trace id is sampled. +func ExtractSampledTraceID(ctx context.Context) (string, bool) { + sp := opentracing.SpanFromContext(ctx) + if sp == nil { + return "", false + } + sctx, ok := sp.Context().(jaeger.SpanContext) + if !ok { + return "", false + } + + return sctx.TraceID().String(), sctx.IsSampled() +} diff --git a/vendor/github.com/weaveworks/common/middleware/instrument.go b/vendor/github.com/weaveworks/common/middleware/instrument.go index 06165251b3ddb..75ddbb85d8fc0 100644 --- a/vendor/github.com/weaveworks/common/middleware/instrument.go +++ b/vendor/github.com/weaveworks/common/middleware/instrument.go @@ -68,9 +68,20 @@ func (i Instrument) Wrap(next http.Handler) http.Handler { next.ServeHTTP(ww, r) }) - i.Duration.WithLabelValues(r.Method, route, strconv.Itoa(respMetrics.Code), isWS).Observe(respMetrics.Duration.Seconds()) i.RequestBodySize.WithLabelValues(r.Method, route).Observe(float64(rBody.read)) i.ResponseBodySize.WithLabelValues(r.Method, route).Observe(float64(respMetrics.Written)) + + histogram := i.Duration.WithLabelValues(r.Method, route, strconv.Itoa(respMetrics.Code), isWS) + if traceID, ok := ExtractSampledTraceID(r.Context()); ok { + // Need to type-convert the Observer to an + // ExemplarObserver. This will always work for a + // HistogramVec. + histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( + respMetrics.Duration.Seconds(), prometheus.Labels{"traceID": traceID}, + ) + return + } + histogram.Observe(respMetrics.Duration.Seconds()) }) } diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index a4bedb80a91da..0c2985f526d75 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -69,14 +69,16 @@ type Config struct { Router *mux.Router `yaml:"-"` DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` - GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` - GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` - GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` - GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` - GRPCServerTime time.Duration `yaml:"grpc_server_keepalive_time"` - GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` + GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` + GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` + GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` + GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` + GRPCServerTime time.Duration `yaml:"grpc_server_keepalive_time"` + GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` + GRPCServerMinTimeBetweenPings time.Duration `yaml:"grpc_server_min_time_between_pings"` + GRPCServerPingWithoutStreamAllowed bool `yaml:"grpc_server_ping_without_stream_allowed"` LogFormat logging.Format `yaml:"log_format"` LogLevel logging.Level `yaml:"log_level"` @@ -122,6 +124,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") f.DurationVar(&cfg.GRPCServerTime, "server.grpc.keepalive.time", time.Hour*2, "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h") f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") + f.DurationVar(&cfg.GRPCServerMinTimeBetweenPings, "server.grpc.keepalive.min-time-between-pings", 5*time.Minute, "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.") + f.BoolVar(&cfg.GRPCServerPingWithoutStreamAllowed, "server.grpc.keepalive.ping-without-stream-allowed", false, "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") cfg.LogFormat.RegisterFlags(f) cfg.LogLevel.RegisterFlags(f) @@ -251,6 +255,11 @@ func New(cfg Config) (*Server, error) { Timeout: cfg.GRPCServerTimeout, } + grpcKeepAliveEnforcementPolicy := keepalive.EnforcementPolicy{ + MinTime: cfg.GRPCServerMinTimeBetweenPings, + PermitWithoutStream: cfg.GRPCServerPingWithoutStreamAllowed, + } + grpcOptions := []grpc.ServerOption{ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( grpcMiddleware..., @@ -259,6 +268,7 @@ func New(cfg Config) (*Server, error) { grpcStreamMiddleware..., )), grpc.KeepaliveParams(grpcKeepAliveOptions), + grpc.KeepaliveEnforcementPolicy(grpcKeepAliveEnforcementPolicy), grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), @@ -349,7 +359,9 @@ func New(cfg Config) (*Server, error) { // RegisterInstrumentation on the given router. func RegisterInstrumentation(router *mux.Router) { - router.Handle("/metrics", promhttp.Handler()) + router.Handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{ + EnableOpenMetrics: true, + })) router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index 9eed911ac735e..8219748d0f6b4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -78,6 +78,9 @@ func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, case bsontype.Null: val.Set(reflect.Zero(val.Type())) return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a []byte", vrType) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go new file mode 100644 index 0000000000000..cb8180f25cccb --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go @@ -0,0 +1,63 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. +type condAddrEncoder struct { + canAddrEnc ValueEncoder + elseEnc ValueEncoder +} + +var _ ValueEncoder = (*condAddrEncoder)(nil) + +// newCondAddrEncoder returns an condAddrEncoder. +func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { + encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return &encoder +} + +// EncodeValue is the ValueEncoderFunc for a value that may be addressable. +func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.CanAddr() { + return cae.canAddrEnc.EncodeValue(ec, vw, val) + } + if cae.elseEnc != nil { + return cae.elseEnc.EncodeValue(ec, vw, val) + } + return ErrNoEncoder{Type: val.Type()} +} + +// condAddrDecoder is the decoder used when a pointer to the value has a decoder. +type condAddrDecoder struct { + canAddrDec ValueDecoder + elseDec ValueDecoder +} + +var _ ValueDecoder = (*condAddrDecoder)(nil) + +// newCondAddrDecoder returns an CondAddrDecoder. +func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { + decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} + return &decoder +} + +// DecodeValue is the ValueDecoderFunc for a value that may be addressable. +func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.CanAddr() { + return cad.canAddrDec.DecodeValue(dc, vr, val) + } + if cad.elseDec != nil { + return cad.elseDec.DecodeValue(dc, vr, val) + } + return ErrNoDecoder{Type: val.Type()} +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 52d2365a34513..a2e2d425a0f7d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,6 +24,16 @@ import ( var defaultValueDecoders DefaultValueDecoders +func newDefaultStructCodec() *StructCodec { + codec, err := NewStructCodec(DefaultStructTagParser) + if err != nil { + // This function is called from the codec registration path, so errors can't be propagated. If there's an error + // constructing the StructCodec, we panic to avoid losing it. + panic(fmt.Errorf("error creating default StructCodec: %v", err)) + } + return codec +} + // DefaultValueDecoders is a namespace type for the default ValueDecoders used // when creating a registry. type DefaultValueDecoders struct{} @@ -77,7 +87,7 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { RegisterDefaultDecoder(reflect.Map, defaultMapCodec). RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, defaultStructCodec). + RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). RegisterTypeMapEntry(bsontype.Double, tFloat64). RegisterTypeMapEntry(bsontype.String, tString). @@ -140,6 +150,10 @@ func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into a boolean", vrType) } @@ -195,6 +209,10 @@ func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.Value if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into an integer type", vrType) } @@ -230,8 +248,8 @@ func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.Value } // UintDecodeValue is the ValueDecoderFunc for uint types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use UIntCodec.DecodeValue instead. +// +// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var i64 int64 var err error @@ -354,6 +372,10 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) } @@ -373,8 +395,8 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val } // StringDecodeValue is the ValueDecoderFunc for string types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use StringCodec.DecodeValue instead. +// +// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var str string var err error @@ -409,6 +431,8 @@ func (DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw. js, err = vr.ReadJavascript() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) } @@ -452,6 +476,10 @@ func (DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.Valu if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) } @@ -474,6 +502,8 @@ func (DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueR data, subtype, err = vr.ReadBinary() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a Binary", vrType) } @@ -536,6 +566,10 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw. if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into an ObjectID", vrType) } @@ -557,6 +591,8 @@ func (DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.Valu dt, err = vr.ReadDateTime() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a DateTime", vrType) } @@ -574,12 +610,21 @@ func (DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueRea return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} } - if vrType := vr.Type(); vrType != bsontype.Null { - return fmt.Errorf("cannot decode %v into a Null", vrType) + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a Null", vr.Type()) } + if err != nil { + return err + } val.Set(reflect.ValueOf(primitive.Null{})) - return vr.ReadNull() + return nil } // RegexDecodeValue is the ValueDecoderFunc for Regex. @@ -595,6 +640,8 @@ func (DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueRe pattern, options, err = vr.ReadRegex() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a Regex", vrType) } @@ -620,6 +667,8 @@ func (DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.Val ns, pointer, err = vr.ReadDBPointer() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a DBPointer", vrType) } @@ -644,6 +693,8 @@ func (DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.Val t, incr, err = vr.ReadTimestamp() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a Timestamp", vrType) } @@ -667,6 +718,8 @@ func (DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueR err = vr.ReadMinKey() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) } @@ -690,6 +743,8 @@ func (DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueR err = vr.ReadMaxKey() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) } @@ -714,6 +769,8 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso d128, err = vr.ReadDecimal128() case bsontype.Null: err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) } @@ -755,6 +812,11 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr return err } val.SetString("") + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + val.SetString("") default: return fmt.Errorf("cannot decode %v into a json.Number", vrType) } @@ -787,14 +849,20 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value } val.Set(reflect.ValueOf(url.URL{})) return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + val.Set(reflect.ValueOf(url.URL{})) + return nil default: return fmt.Errorf("cannot decode %v into a *url.URL", vrType) } } // TimeDecodeValue is the ValueDecoderFunc for time.Time. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use Time.DecodeValue instead. +// +// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.DateTime { return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) @@ -814,8 +882,8 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu } // ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use ByteSliceCodec.DecodeValue instead. +// +// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) @@ -843,8 +911,8 @@ func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw } // MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use Map.DecodeValue instead. +// +// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} @@ -935,6 +1003,9 @@ func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.Val case bsontype.Null: val.Set(reflect.Zero(val.Type())) return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into an array", vrType) } @@ -964,8 +1035,8 @@ func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.Val } // SliceDecodeValue is the ValueDecoderFunc for slice types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use SliceCodec.DecodeValue instead. +// +// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Slice { return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -1075,8 +1146,8 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use EmptyInterfaceCodec.DecodeValue instead. +// +// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tEmpty { return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} @@ -1146,6 +1217,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR return nil, err } + idx := 0 for { vr, err := ar.ReadValue() if err == bsonrw.ErrEOA { @@ -1159,9 +1231,10 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR err = decoder.DecodeValue(dc, vr, elem) if err != nil { - return nil, err + return nil, newDecodeError(strconv.Itoa(idx), err) } elems = append(elems, elem) + idx++ } return elems, nil @@ -1200,6 +1273,12 @@ func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bs } val.Set(reflect.ValueOf(primitive.CodeWithScope{})) return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + val.Set(reflect.ValueOf(primitive.CodeWithScope{})) + return nil default: return fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) } @@ -1239,7 +1318,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b val := reflect.New(tEmpty).Elem() err = decoder.DecodeValue(dc, vr, val) if err != nil { - return nil, err + return nil, newDecodeError(key, err) } elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 08078b304d270..01ddbbb672156 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -104,7 +104,7 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { RegisterDefaultEncoder(reflect.Map, defaultMapCodec). RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, defaultStructCodec). + RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). @@ -150,8 +150,8 @@ func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.Value } // UintEncodeValue is the ValueEncoderFunc for uint types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use UIntCodec.EncodeValue instead. +// +// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Uint8, reflect.Uint16: @@ -185,8 +185,8 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val } // StringEncodeValue is the ValueEncoderFunc for string types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use StringCodec.EncodeValue instead. +// +// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ @@ -245,19 +245,20 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value } // TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use TimeCodec.EncodeValue instead. +// +// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } tt := val.Interface().(time.Time) - return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) } // ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use ByteSliceCodec.EncodeValue instead. +// +// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} @@ -269,8 +270,8 @@ func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw } // MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use MapCodec.EncodeValue instead. +// +// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} @@ -419,8 +420,8 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val } // SliceEncodeValue is the ValueEncoderFunc for slice types. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use SliceCodec.EncodeValue instead. +// +// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Slice { return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -501,8 +502,8 @@ func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncod } // EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// This method is deprecated and does not have any stability guarantees. It may be removed in the -// future. Use EmptyInterfaceCodec.EncodeValue instead. +// +// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tEmpty { return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 85ae9c6a19e97..d641960c10209 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -20,12 +20,29 @@ var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool + DecodeZerosMap bool + EncodeNilAsEmpty bool + EncodeKeysWithStringer bool } var _ ValueCodec = &MapCodec{} +// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. +// This applies to types used as map keys and is similar to encoding.TextMarshaler. +type KeyMarshaler interface { + MarshalKey() (key string, err error) +} + +// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation +// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. +// +// UnmarshalKey must be able to decode the form generated by MarshalKey. +// UnmarshalKey must copy the text if it wishes to retain the text +// after returning. +type KeyUnmarshaler interface { + UnmarshalKey(key string) error +} + // NewMapCodec returns a MapCodec with options opts. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -37,6 +54,9 @@ func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { if mapOpt.EncodeNilAsEmpty != nil { codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty } + if mapOpt.EncodeKeysWithStringer != nil { + codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer + } return &codec } @@ -79,7 +99,11 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v keys := val.MapKeys() for _, key := range keys { - keyStr := fmt.Sprint(key) + keyStr, err := mc.encodeKey(key) + if err != nil { + return err + } + if collisionFn != nil && collisionFn(keyStr) { return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) } @@ -129,6 +153,9 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref case bsontype.Null: val.Set(reflect.Zero(val.Type())) return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() default: return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) } @@ -157,7 +184,6 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref } keyType := val.Type().Key() - keyKind := keyType.Kind() for { key, vr, err := dr.ReadElement() @@ -168,29 +194,15 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref return err } - k := reflect.ValueOf(key) - if keyType != tString { - switch keyKind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - parsed, err := strconv.ParseFloat(k.String(), 64) - if err != nil { - return fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyKind, err) - } - k = reflect.ValueOf(parsed) - case reflect.String: // if keyType wraps string - default: - return fmt.Errorf("BSON map must have string or decimal keys. Got:%v", val.Type()) - } - - k = k.Convert(keyType) + k, err := mc.decodeKey(key, keyType) + if err != nil { + return err } elem := reflect.New(eType).Elem() err = decoder.DecodeValue(dc, vr, elem) if err != nil { - return err + return newDecodeError(key, err) } val.SetMapIndex(k, elem) @@ -204,3 +216,82 @@ func clearMap(m reflect.Value) { m.SetMapIndex(k, none) } } + +func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { + if mc.EncodeKeysWithStringer { + return fmt.Sprint(val), nil + } + + // keys of any string type are used directly + if val.Kind() == reflect.String { + return val.String(), nil + } + // KeyMarshalers are marshaled + if km, ok := val.Interface().(KeyMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + buf, err := km.MarshalKey() + if err == nil { + return buf, nil + } + return "", err + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil + } + return "", fmt.Errorf("unsupported key type: %v", val.Type()) +} + +var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() + +func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { + keyVal := reflect.ValueOf(key) + var err error + switch { + // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler + case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(KeyUnmarshaler) + err = v.UnmarshalKey(key) + keyVal = keyVal.Elem() + // Otherwise, go to type specific behavior + default: + switch keyType.Kind() { + case reflect.String: + keyVal = reflect.ValueOf(key).Convert(keyType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, parseErr := strconv.ParseInt(s, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { + err = fmt.Errorf("failed to unmarshal number key %v", s) + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, parseErr := strconv.ParseUint(s, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { + err = fmt.Errorf("failed to unmarshal number key %v", s) + break + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Float32, reflect.Float64: + if mc.EncodeKeysWithStringer { + parsed, err := strconv.ParseFloat(key, 64) + if err != nil { + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + } + keyVal = reflect.ValueOf(parsed) + break + } + fallthrough + default: + return keyVal, fmt.Errorf("unsupported key type: %v", keyType) + } + } + return keyVal, err +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index 0d9502f214a57..616a3e701b753 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -14,11 +14,6 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -var defaultPointerCodec = &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), -} - var _ ValueEncoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{} @@ -83,6 +78,10 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val val.Set(reflect.Zero(val.Type())) return vr.ReadNull() } + if vr.Type() == bsontype.Undefined { + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + } if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 10f1aed950e25..60abffb248366 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -187,8 +187,9 @@ func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) return rb } -// RegisterEncoder has been deprecated and will be removed in a future major version release. Use RegisterTypeEncoder -// or RegisterHookEncoder instead. +// RegisterEncoder registers the provided type and encoder pair. +// +// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { if t == tEmpty { rb.typeEncoders[t] = enc @@ -210,8 +211,9 @@ func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *Re return rb } -// RegisterDecoder has been deprecated and will be removed in a future major version release. Use RegisterTypeDecoder -// or RegisterHookDecoder instead. +// RegisterDecoder registers the provided type and decoder pair. +// +// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { if t == nil { rb.typeDecoders[nil] = dec @@ -325,7 +327,7 @@ func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { return enc, nil } - enc, found = r.lookupInterfaceEncoder(t) + enc, found = r.lookupInterfaceEncoder(t, true) if found { r.mu.Lock() r.typeEncoders[t] = enc @@ -359,14 +361,23 @@ func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { return enc, found } -func (r *Registry) lookupInterfaceEncoder(t reflect.Type) (ValueEncoder, bool) { +func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { if t == nil { return nil, false } for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) || reflect.PtrTo(t).Implements(ienc.i) { + if t.Implements(ienc.i) { return ienc.ve, true } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if !found { + defaultEnc, _ = r.kindEncoders[t.Kind()] + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } } return nil, false } @@ -397,7 +408,7 @@ func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { return dec, nil } - dec, found = r.lookupInterfaceDecoder(t) + dec, found = r.lookupInterfaceDecoder(t, true) if found { r.mu.Lock() r.typeDecoders[t] = dec @@ -424,13 +435,20 @@ func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { return dec, found } -func (r *Registry) lookupInterfaceDecoder(t reflect.Type) (ValueDecoder, bool) { +func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { for _, idec := range r.interfaceDecoders { - if !t.Implements(idec.i) && !reflect.PtrTo(t).Implements(idec.i) { - continue + if t.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further ahead + // in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(t, false) + if !found { + defaultDec, _ = r.kindDecoders[t.Kind()] + } + return newCondAddrDecoder(idec.vd, defaultDec), true } - - return idec.vd, true } return nil, false } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index f0282eb23b8d0..3c1b6b860ae4f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -123,6 +123,9 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r case bsontype.Null: val.Set(reflect.Zero(val.Type())) return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() case bsontype.Type(0), bsontype.EmbeddedDocument: if val.Type().Elem() != tE { return fmt.Errorf("cannot decode document into %s", val.Type()) @@ -149,8 +152,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r } return nil case bsontype.String: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", vrType) + if sliceType := val.Type().Elem(); sliceType != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) } str, err := vr.ReadString() if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index c672cf5a68691..910f2049a4876 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -85,6 +85,10 @@ func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, va if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into a string type", vr.Type()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 777cdfb69584f..30afc35ef992c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -19,9 +19,35 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -var defaultStructCodec = &StructCodec{ - cache: make(map[reflect.Type]*structDescription), - parser: DefaultStructTagParser, +// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. +type DecodeError struct { + keys []string + wrapped error +} + +// Unwrap returns the underlying error +func (de *DecodeError) Unwrap() error { + return de.wrapped +} + +// Error implements the error interface. +func (de *DecodeError) Error() string { + // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the + // stack of BSON keys, so we call de.Keys(), which reverses them. + keyPath := strings.Join(de.Keys(), ".") + return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) +} + +// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down +// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be +// a string, the keys slice will be ["a", "b", "c"]. +func (de *DecodeError) Keys() []string { + reversedKeys := make([]string, 0, len(de.keys)) + for idx := len(de.keys) - 1; idx >= 0; idx-- { + reversedKeys = append(reversedKeys, de.keys[idx]) + } + + return reversedKeys } // Zeroer allows custom struct types to implement a report of zero @@ -166,6 +192,19 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return dw.WriteDocumentEnd() } +func newDecodeError(key string, original error) error { + de, ok := original.(*DecodeError) + if !ok { + return &DecodeError{ + keys: []string{key}, + wrapped: original, + } + } + + de.keys = append(de.keys, key) + return de +} + // DecodeValue implements the Codec interface. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. @@ -181,6 +220,13 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return err } + val.Set(reflect.Zero(val.Type())) + return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + val.Set(reflect.Zero(val.Type())) return nil default: @@ -267,7 +313,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } if !field.CanSet() { // Being settable is a super set of being addressable. - return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field) + innerErr := fmt.Errorf("field %v is not settable", field) + return newDecodeError(fd.name, innerErr) } if field.Kind() == reflect.Ptr && field.IsNil() { field.Set(reflect.New(field.Type().Elem())) @@ -276,19 +323,19 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} if fd.decoder == nil { - return ErrNoDecoder{Type: field.Elem().Type()} + return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } if decoder, ok := fd.decoder.(ValueDecoder); ok { err = decoder.DecodeValue(dctx, vr, field.Elem()) if err != nil { - return err + return newDecodeError(fd.name, err) } continue } err = fd.decoder.DecodeValue(dctx, vr, field) if err != nil { - return err + return newDecodeError(fd.name, err) } } @@ -350,7 +397,8 @@ type structDescription struct { } type fieldDescription struct { - name string + name string // BSON key name + fieldName string // struct field name idx int omitEmpty bool minSize bool @@ -394,7 +442,12 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr decoder = nil } - description := fieldDescription{idx: i, encoder: encoder, decoder: decoder} + description := fieldDescription{ + fieldName: sf.Name, + idx: i, + encoder: encoder, + decoder: decoder, + } stags, err := sc.parser.ParseStructTags(sf) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index 6f1b724d19b01..a7df44db70bf6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -14,6 +14,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonoptions" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) const ( @@ -80,6 +81,10 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re if err := vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into a time.Time", vrType) } @@ -97,5 +102,6 @@ func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } tt := val.Interface().(time.Time) - return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index e0df05837486b..3c991264d1161 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -112,6 +112,10 @@ func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if err = vr.ReadNull(); err != nil { return err } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return err + } default: return fmt.Errorf("cannot decode %v into an integer type", vrType) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go index 1ac3e20088a31..7a6a880b88a0b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -10,6 +10,12 @@ package bsonoptions type MapCodecOptions struct { DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. + // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must + // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a + // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the + // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override + // TextMarshaler/TextUnmarshaler. Defaults to false. + EncodeKeysWithStringer *bool } // MapCodec creates a new *MapCodecOptions @@ -23,12 +29,22 @@ func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { return t } -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { t.EncodeNilAsEmpty = &b return t } +// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the +// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key +// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with +// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer +// will override TextMarshaler/TextUnmarshaler. Defaults to false. +func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { + t.EncodeKeysWithStringer = &b + return t +} + // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { s := MapCodec() @@ -42,6 +58,9 @@ func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { if opt.EncodeNilAsEmpty != nil { s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty } + if opt.EncodeKeysWithStringer != nil { + s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer + } } return s diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 7e9612c07434e..7bcf662135afc 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -217,7 +217,7 @@ func parseDatetimeString(data string) (int64, error) { return 0, fmt.Errorf("invalid $date value string: %s", data) } - return t.Unix()*1e3 + int64(t.Nanosecond())/1e6, nil + return int64(primitive.NewDateTimeFromTime(t)), nil } func parseDatetimeObject(data *extJSONObject) (d int64, err error) { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 2943f14ecace1..67a69da3099ac 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -53,7 +53,7 @@ // 16. BSON min key unmarshals to an primitive.MinKey. // 17. BSON max key unmarshals to an primitive.MaxKey. // 18. BSON undefined unmarshals to a primitive.Undefined. -// 19. BSON null unmarshals to a primitive.Null. +// 19. BSON null unmarshals to nil. // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // @@ -67,13 +67,13 @@ // 5. uint8 and uint16 marshal to a BSON int32. // 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, // inclusive, and BSON int64 otherwise. -// 7. BSON null values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null value into a string -// will yield the empty string.). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// undefined value into a string will yield the empty string.). // // Structs // -// Structs can be marshalled/unmarshalled to/from BSON. When transforming structs to/from BSON, the following rules -// apply: +// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// JSON, the following rules apply: // // 1. Only exported fields in structs will be marshalled or unmarshalled. // @@ -89,7 +89,10 @@ // 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // unmarshalled into an interface{} field will be unmarshalled as a D. // -// The following struct tags can be used to configure behavior: +// The encoding of each struct field can be customized by the "bson" struct tag. +// The tag gives the name of the field, possibly followed by a comma-separated list of options. +// The name may be empty in order to specify options without overriding the default field name. The following options can be used +// to configure behavior: // // 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // the zero value. By default, a struct field is only considered empty if the field's type implements the Zeroer diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 41d1cf28864c9..a0eb5378cbf0c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -93,11 +93,18 @@ func (id ObjectID) MarshalJSON() ([]byte, error) { return json.Marshal(id.Hex()) } -// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 64 bytes long, it +// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it // will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes -// long, it will be populated with the BSON representation of the ObjectID. Otherwise, it will -// return an error. +// long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and +// decodes them as NilObjectID. For any other inputs, an error will be returned. func (id *ObjectID) UnmarshalJSON(b []byte) error { + // Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field + // will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not + // enter the UnmarshalJSON hook. + if string(b) == "null" { + return nil + } + var err error switch len(b) { case 12: @@ -125,6 +132,12 @@ func (id *ObjectID) UnmarshalJSON(b []byte) error { } } + // An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID. + if len(str) == 0 { + copy(id[:], NilObjectID[:]) + return nil + } + if len(str) != 24 { return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str)) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index f47f8df3c84ec..5420496b5708f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -40,11 +40,32 @@ type Undefined struct{} // DateTime represents the BSON datetime value. type DateTime int64 +var _ json.Marshaler = DateTime(0) +var _ json.Unmarshaler = (*DateTime)(nil) + // MarshalJSON marshal to time type func (d DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(d.Time()) } +// UnmarshalJSON creates a primitive.DateTime from a JSON string. +func (d *DateTime) UnmarshalJSON(data []byte) error { + // Ignore "null" to keep parity with the time.Time type and the standard library. Decoding "null" into a non-pointer + // DateTime field will leave the field unchanged. For pointer values, the encoding/json will set the pointer to nil + // and will not defer to the UnmarshalJSON hook. + if string(data) == "null" { + return nil + } + + var tempTime time.Time + if err := json.Unmarshal(data, &tempTime); err != nil { + return err + } + + *d = NewDateTimeFromTime(tempTime) + return nil +} + // Time returns the date as a time type. func (d DateTime) Time() time.Time { return time.Unix(int64(d)/1000, int64(d)%1000*1000000) @@ -52,7 +73,7 @@ func (d DateTime) Time() time.Time { // NewDateTimeFromTime creates a new DateTime from a Time. func NewDateTimeFromTime(t time.Time) DateTime { - return DateTime(t.UnixNano() / 1000000) + return DateTime(t.Unix()*1e3 + int64(t.Nanosecond())/1e6) } // Null represents the BSON null value. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index d59afcfe5458d..bd4c05039894f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -104,7 +104,9 @@ func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interfa } func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} } -func convertToCoreValue(v RawValue) bsoncore.Value { return bsoncore.Value{Type: v.Type, Data: v.Value} } +func convertToCoreValue(v RawValue) bsoncore.Value { + return bsoncore.Value{Type: v.Type, Data: v.Value} +} // Validate ensures the value is a valid BSON value. func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() } @@ -176,7 +178,9 @@ func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv) // ObjectIDOK is the same as ObjectID, except it returns a boolean instead of // panicking. -func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { return convertToCoreValue(rv).ObjectIDOK() } +func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { + return convertToCoreValue(rv).ObjectIDOK() +} // Boolean returns the boolean value the Value represents. It panics if the // value is a BSON type other than boolean. @@ -214,7 +218,9 @@ func (rv RawValue) RegexOK() (pattern, options string, ok bool) { // DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON // type other than DBPointer. -func (rv RawValue) DBPointer() (string, primitive.ObjectID) { return convertToCoreValue(rv).DBPointer() } +func (rv RawValue) DBPointer() (string, primitive.ObjectID) { + return convertToCoreValue(rv).DBPointer() +} // DBPointerOK is the same as DBPoitner, except that it returns a boolean // instead of panicking. diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 2a20d2e075c08..f7997fbbb7459 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -39,11 +39,14 @@ import ( // EmptyDocumentLength is the length of a document that has been started/ended but has no elements. const EmptyDocumentLength = 5 +// nullTerminator is a string version of the 0 byte that is appended at the end of cstrings. +const nullTerminator = string(byte(0)) + // AppendType will append t to dst and return the extended buffer. func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) } // AppendKey will append key to dst and return the extended buffer. -func AppendKey(dst []byte, key string) []byte { return append(dst, key+string(0x00)...) } +func AppendKey(dst []byte, key string) []byte { return append(dst, key+nullTerminator...) } // AppendHeader will append Type t and key to dst and return the extended // buffer. @@ -427,7 +430,7 @@ func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst, // AppendRegex will append pattern and options to dst and return the extended buffer. func AppendRegex(dst []byte, pattern, options string) []byte { - return append(dst, pattern+string(0x00)+options+string(0x00)...) + return append(dst, pattern+nullTerminator+options+nullTerminator...) } // AppendRegexElement will append a BSON regex element using key, pattern, and diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index 91932fd1bebce..d397cde2995de 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -181,7 +181,8 @@ func (d Document) LookupErr(key ...string) (Value, error) { if !ok { return Value{}, NewInsufficientBytesError(d, rem) } - if elem.Key() != key[0] { + // We use `KeyBytes` rather than `Key` to avoid a needless string alloc. + if string(elem.KeyBytes()) != key[0] { continue } if len(key) > 1 { diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go index 163645b86f3f7..1415b077912fa 100644 --- a/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -27,8 +27,7 @@ import ( type Config struct { // HTTPProxy represents the value of the HTTP_PROXY or // http_proxy environment variable. It will be used as the proxy - // URL for HTTP requests and HTTPS requests unless overridden by - // HTTPSProxy or NoProxy. + // URL for HTTP requests unless overridden by NoProxy. HTTPProxy string // HTTPSProxy represents the HTTPS_PROXY or https_proxy @@ -129,8 +128,7 @@ func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { var proxy *url.URL if reqURL.Scheme == "https" { proxy = cfg.httpsProxy - } - if proxy == nil { + } else if reqURL.Scheme == "http" { proxy = cfg.httpProxy if proxy != nil && cfg.CGI { return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4ec32669966ed..8b129b79419c2 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -154,12 +154,21 @@ func (t *Transport) pingTimeout() time.Duration { // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. +// +// Use ConfigureTransports instead to configure the HTTP/2 Transport. func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) + _, err := ConfigureTransports(t1) return err } -func configureTransport(t1 *http.Transport) (*Transport, error) { +// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns a new HTTP/2 Transport for further configuration. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransports(t1 *http.Transport) (*Transport, error) { + return configureTransports(t1) +} + +func configureTransports(t1 *http.Transport) (*Transport, error) { connPool := new(clientConnPool) t2 := &Transport{ ConnPool: noDialClientConnPool{connPool}, @@ -689,6 +698,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { + cc.Close() return nil, cc.werr } @@ -1080,6 +1090,15 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 + defer func() { + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() + } + }() + cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) @@ -1129,6 +1148,9 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // we can keep it. bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWrite) + if hasBody && !bodyWritten { + <-bodyWriter.resc + } } if re.err != nil { cc.forgetStreamID(cs.ID) @@ -1149,6 +1171,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), errTimeout @@ -1158,6 +1181,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), ctx.Err() @@ -1167,6 +1191,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc } cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), errRequestCanceled @@ -1176,6 +1201,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf // forgetStreamID. return nil, cs.getStartedWrite(), cs.resetErr case err := <-bodyWriter.resc: + bodyWritten = true // Prefer the read loop's response, if available. Issue 16102. select { case re := <-readLoopResCh: @@ -1186,7 +1212,6 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), err } - bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.0.0.go similarity index 99% rename from vendor/golang.org/x/net/idna/tables12.00.go rename to vendor/golang.org/x/net/idna/tables12.0.0.go index f4b8ea3638ee5..f39f0cb4cd879 100644 --- a/vendor/golang.org/x/net/idna/tables12.00.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go new file mode 100644 index 0000000000000..e8c7a36d7a747 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -0,0 +1,4839 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +var mappings string = "" + // Size: 8188 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x06令和\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニ" + + "ング\x09インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー" + + "\x09ガロン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0f" + + "キロワット\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル" + + "\x0fサンチーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット" + + "\x09ハイツ\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0c" + + "フィート\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ" + + "\x0cポイント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク" + + "\x0fマンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09" + + "ユアン\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x04" + + "2点\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x02ʍ\x04𤋮\x04𢡊\x04𢡄\x04𣏕" + + "\x04𥉉\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ" + + "\x04יִ\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּ" + + "ׂ\x04אַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04" + + "ךּ\x04כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ" + + "\x04תּ\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ" + + "\x02ڤ\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ" + + "\x02ڳ\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ" + + "\x02ۅ\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02" + + "ی\x04ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04" + + "تح\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج" + + "\x04حم\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح" + + "\x04ضخ\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ" + + "\x04فم\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل" + + "\x04كم\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ" + + "\x04مم\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى" + + "\x04هي\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 " + + "ٍّ\x05 َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04ت" + + "ر\x04تز\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04" + + "ين\x04ئخ\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه" + + "\x04شم\x04شه\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي" + + "\x04سى\x04سي\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي" + + "\x04ضى\x04ضي\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06ت" + + "حج\x06تحم\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سج" + + "ح\x06سجى\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم" + + "\x06ضحى\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي" + + "\x06غمى\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح" + + "\x06محج\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم" + + "\x06نحم\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى" + + "\x06تخي\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي" + + "\x06ضحي\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي" + + "\x06كمي\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي" + + "\x06سخي\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08" + + "عليه\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:" + + "\x01!\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\" + + "\x01$\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ" + + "\x02إ\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز" + + "\x02س\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن" + + "\x02ه\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~" + + "\x02¢\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲" + + "\x08𝆹𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η" + + "\x02κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ" + + "\x02ڡ\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029," + + "\x03(a)\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)" + + "\x03(k)\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)" + + "\x03(u)\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03p" + + "pv\x02wc\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ" + + "\x03二\x03多\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終" + + "\x03生\x03販\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指" + + "\x03走\x03打\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔" + + "三〕\x09〔二〕\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03" + + "丸\x03乁\x03你\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03" + + "具\x03㒹\x03內\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03" + + "㔕\x03勇\x03勉\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03" + + "灰\x03及\x03叟\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03" + + "啣\x03善\x03喙\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03" + + "埴\x03堍\x03型\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03" + + "姘\x03婦\x03㛮\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03" + + "屮\x03峀\x03岍\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03" + + "㡢\x03㡼\x03庰\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03" + + "忍\x03志\x03忹\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03" + + "憤\x03憯\x03懞\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03" + + "掃\x03揤\x03搢\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03" + + "書\x03晉\x03㬙\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03" + + "朡\x03杞\x03杓\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03" + + "槪\x03檨\x03櫛\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03" + + "汧\x03洖\x03派\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03" + + "淹\x03潮\x03濆\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03" + + "爵\x03牐\x03犀\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03" + + "㼛\x03甤\x03甾\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03" + + "䂖\x03硎\x03碌\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03" + + "築\x03䈧\x03糒\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03" + + "罺\x03羕\x03翺\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03" + + "䑫\x03芑\x03芋\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03" + + "莽\x03菧\x03著\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03" + + "䕫\x03虐\x03虜\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03" + + "蠁\x03䗹\x03衠\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03" + + "豕\x03貫\x03賁\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03" + + "鈸\x03鋗\x03鋘\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03" + + "䩶\x03韠\x03䪲\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03" + + "鳽\x03䳎\x03䳭\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4862 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + + "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + + "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + + "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + + "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + + "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + + "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + + "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + + "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + + "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + + "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + + "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + + "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + + "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + + "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + + "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + + "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + + "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + + "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + + "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + + "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + + "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + + "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + + "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + + "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + + "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + + "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + + "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + + "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + + "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + + "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + + "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + + "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + + "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + + "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + + "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + + "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + + "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + + "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + + "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + + "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + + "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + + "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + + "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + + "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + + "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + + "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + + "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + + "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + + "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + + "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + + "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + + "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + + "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + + "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + + "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + + "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + + "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + + "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + + "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + + "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + + "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + + ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + + "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + + "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + + "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + + "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + + "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + + "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + + "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + + "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + + "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + + "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + + "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + + ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 30288 bytes (29.58 KiB). Checksum: c0cd84404a2f6f19. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 126: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 126 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 128 blocks, 8192 entries, 16384 bytes +// The third block is the zero block. +var idnaValues = [8192]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0c08, 0x557: 0x0c08, + 0x558: 0x0c08, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x3308, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, + 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, + 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, + 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, + 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e, + 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, + 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, + 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5, + 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875, + 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935, + 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5, + 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15, + 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75, + 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95, + 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75, + 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5, + 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55, + 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15, + 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95, + 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5, + 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5, + 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5, + 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5, + 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275, + 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0008, + 0xf3c: 0x0008, 0xf3d: 0x0008, 0xf3e: 0x0008, 0xf3f: 0x0008, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, + 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, + 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, + 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, + 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, + 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, + 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, + 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915, + 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5, + 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95, + 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, + 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, + 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, + 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, + 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, + 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, + 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, + 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd, + 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, + 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, + 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, + 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x3cc9, + // Block 0x40, offset 0x1000 + 0x1000: 0x3d01, 0x1001: 0x3d69, 0x1002: 0x3dd1, 0x1003: 0x3e39, 0x1004: 0x3e89, 0x1005: 0x3ef1, + 0x1006: 0x3f41, 0x1007: 0x3f91, 0x1008: 0x4011, 0x1009: 0x4079, 0x100a: 0x40c9, 0x100b: 0x4119, + 0x100c: 0x4169, 0x100d: 0x41d1, 0x100e: 0x4239, 0x100f: 0x4289, 0x1010: 0x42d9, 0x1011: 0x4311, + 0x1012: 0x4361, 0x1013: 0x43c9, 0x1014: 0x4431, 0x1015: 0x4469, 0x1016: 0x44e9, 0x1017: 0x4581, + 0x1018: 0x4601, 0x1019: 0x4651, 0x101a: 0x46d1, 0x101b: 0x4751, 0x101c: 0x47b9, 0x101d: 0x4809, + 0x101e: 0x4859, 0x101f: 0x48a9, 0x1020: 0x4911, 0x1021: 0x4991, 0x1022: 0x49f9, 0x1023: 0x4a49, + 0x1024: 0x4a99, 0x1025: 0x4ae9, 0x1026: 0x4b21, 0x1027: 0x4b59, 0x1028: 0x4b91, 0x1029: 0x4bc9, + 0x102a: 0x4c19, 0x102b: 0x4c69, 0x102c: 0x4ce9, 0x102d: 0x4d39, 0x102e: 0x4da1, 0x102f: 0x4e21, + 0x1030: 0x4e71, 0x1031: 0x4ea9, 0x1032: 0x4ee1, 0x1033: 0x4f61, 0x1034: 0x4fc9, 0x1035: 0x5049, + 0x1036: 0x5099, 0x1037: 0x5119, 0x1038: 0x5151, 0x1039: 0x51a1, 0x103a: 0x51f1, 0x103b: 0x5241, + 0x103c: 0x5291, 0x103d: 0x52e1, 0x103e: 0x5349, 0x103f: 0x5399, + // Block 0x41, offset 0x1040 + 0x1040: 0x53d1, 0x1041: 0x5421, 0x1042: 0x5471, 0x1043: 0x54c1, 0x1044: 0x5529, 0x1045: 0x5579, + 0x1046: 0x55c9, 0x1047: 0x5619, 0x1048: 0x5699, 0x1049: 0x5701, 0x104a: 0x5739, 0x104b: 0x57b9, + 0x104c: 0x57f1, 0x104d: 0x5859, 0x104e: 0x58c1, 0x104f: 0x5911, 0x1050: 0x5961, 0x1051: 0x59b1, + 0x1052: 0x5a19, 0x1053: 0x5a51, 0x1054: 0x5aa1, 0x1055: 0x5b09, 0x1056: 0x5b41, 0x1057: 0x5bc1, + 0x1058: 0x5c11, 0x1059: 0x5c39, 0x105a: 0x5c61, 0x105b: 0x5c89, 0x105c: 0x5cb1, 0x105d: 0x5cd9, + 0x105e: 0x5d01, 0x105f: 0x5d29, 0x1060: 0x5d51, 0x1061: 0x5d79, 0x1062: 0x5da1, 0x1063: 0x5dd1, + 0x1064: 0x5e01, 0x1065: 0x5e31, 0x1066: 0x5e61, 0x1067: 0x5e91, 0x1068: 0x5ec1, 0x1069: 0x5ef1, + 0x106a: 0x5f21, 0x106b: 0x5f51, 0x106c: 0x5f81, 0x106d: 0x5fb1, 0x106e: 0x5fe1, 0x106f: 0x6011, + 0x1070: 0x6041, 0x1071: 0x4045, 0x1072: 0x6071, 0x1073: 0x6089, 0x1074: 0x4065, 0x1075: 0x60a1, + 0x1076: 0x60b9, 0x1077: 0x60d1, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60e9, 0x107b: 0x6101, + 0x107c: 0x6139, 0x107d: 0x6171, 0x107e: 0x61a9, 0x107f: 0x61e1, + // Block 0x42, offset 0x1080 + 0x1080: 0x6249, 0x1081: 0x6261, 0x1082: 0x40a5, 0x1083: 0x6279, 0x1084: 0x6291, 0x1085: 0x62a9, + 0x1086: 0x62c1, 0x1087: 0x62d9, 0x1088: 0x40c5, 0x1089: 0x62f1, 0x108a: 0x6319, 0x108b: 0x6331, + 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6349, 0x108f: 0x6361, 0x1090: 0x6379, 0x1091: 0x4105, + 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6391, 0x1097: 0x63a9, + 0x1098: 0x63c1, 0x1099: 0x63d9, 0x109a: 0x63f1, 0x109b: 0x41a5, 0x109c: 0x6409, 0x109d: 0x6421, + 0x109e: 0x6439, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6451, 0x10a2: 0x4205, 0x10a3: 0x4225, + 0x10a4: 0x4245, 0x10a5: 0x6469, 0x10a6: 0x4265, 0x10a7: 0x6481, 0x10a8: 0x64b1, 0x10a9: 0x6249, + 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64e9, 0x10af: 0x6529, + 0x10b0: 0x6571, 0x10b1: 0x6589, 0x10b2: 0x4305, 0x10b3: 0x65a1, 0x10b4: 0x65b9, 0x10b5: 0x65d1, + 0x10b6: 0x4325, 0x10b7: 0x65e9, 0x10b8: 0x6601, 0x10b9: 0x65e9, 0x10ba: 0x6619, 0x10bb: 0x6631, + 0x10bc: 0x4345, 0x10bd: 0x6649, 0x10be: 0x6661, 0x10bf: 0x6649, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6679, 0x10c4: 0x6691, 0x10c5: 0x66a9, + 0x10c6: 0x66c1, 0x10c7: 0x0040, 0x10c8: 0x66f9, 0x10c9: 0x6711, 0x10ca: 0x6729, 0x10cb: 0x6741, + 0x10cc: 0x6759, 0x10cd: 0x6771, 0x10ce: 0x6439, 0x10cf: 0x6789, 0x10d0: 0x67a1, 0x10d1: 0x67b9, + 0x10d2: 0x43a5, 0x10d3: 0x67d1, 0x10d4: 0x62c1, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67e9, + 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x6801, 0x10db: 0x6819, 0x10dc: 0x6831, 0x10dd: 0x6849, + 0x10de: 0x6861, 0x10df: 0x6891, 0x10e0: 0x68c1, 0x10e1: 0x68e9, 0x10e2: 0x6911, 0x10e3: 0x6939, + 0x10e4: 0x6961, 0x10e5: 0x6989, 0x10e6: 0x69b1, 0x10e7: 0x69d9, 0x10e8: 0x6a01, 0x10e9: 0x6a29, + 0x10ea: 0x6a59, 0x10eb: 0x6a89, 0x10ec: 0x6ab9, 0x10ed: 0x6ae9, 0x10ee: 0x6b19, 0x10ef: 0x6b49, + 0x10f0: 0x6b79, 0x10f1: 0x6ba9, 0x10f2: 0x6bd9, 0x10f3: 0x6c09, 0x10f4: 0x6c39, 0x10f5: 0x6c69, + 0x10f6: 0x6c99, 0x10f7: 0x6cc9, 0x10f8: 0x6cf9, 0x10f9: 0x6d29, 0x10fa: 0x6d59, 0x10fb: 0x6d89, + 0x10fc: 0x6db9, 0x10fd: 0x6de9, 0x10fe: 0x6e19, 0x10ff: 0x4425, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e49, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e61, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e79, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e91, 0x1231: 0x6ea9, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, + 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, + // Block 0x49, offset 0x1240 + 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, + 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, + 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, + 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, + 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, + 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, + 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, + 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, + 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, + 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, + 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6f19, 0x1290: 0x6f41, 0x1291: 0x6f69, + 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f91, 0x1296: 0x6fb9, 0x1297: 0x6fe1, + 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x7009, 0x12c1: 0x7021, 0x12c2: 0x7039, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7051, + 0x12c6: 0x7051, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7069, 0x12d4: 0x7091, 0x12d5: 0x70b9, 0x12d6: 0x70e1, 0x12d7: 0x7109, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x7131, + 0x12de: 0x3308, 0x12df: 0x7159, 0x12e0: 0x7181, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7199, + 0x12e4: 0x71b1, 0x12e5: 0x71c9, 0x12e6: 0x71e1, 0x12e7: 0x71f9, 0x12e8: 0x7211, 0x12e9: 0x1fb2, + 0x12ea: 0x7229, 0x12eb: 0x7251, 0x12ec: 0x7279, 0x12ed: 0x72b1, 0x12ee: 0x72e9, 0x12ef: 0x7311, + 0x12f0: 0x7339, 0x12f1: 0x7361, 0x12f2: 0x7389, 0x12f3: 0x73b1, 0x12f4: 0x73d9, 0x12f5: 0x7401, + 0x12f6: 0x7429, 0x12f7: 0x0040, 0x12f8: 0x7451, 0x12f9: 0x7479, 0x12fa: 0x74a1, 0x12fb: 0x74c9, + 0x12fc: 0x74f1, 0x12fd: 0x0040, 0x12fe: 0x7519, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7541, 0x1301: 0x7569, 0x1302: 0x0040, 0x1303: 0x7591, 0x1304: 0x75b9, 0x1305: 0x0040, + 0x1306: 0x75e1, 0x1307: 0x7609, 0x1308: 0x7631, 0x1309: 0x7659, 0x130a: 0x7681, 0x130b: 0x76a9, + 0x130c: 0x76d1, 0x130d: 0x76f9, 0x130e: 0x7721, 0x130f: 0x7749, 0x1310: 0x7771, 0x1311: 0x7771, + 0x1312: 0x7789, 0x1313: 0x7789, 0x1314: 0x7789, 0x1315: 0x7789, 0x1316: 0x77a1, 0x1317: 0x77a1, + 0x1318: 0x77a1, 0x1319: 0x77a1, 0x131a: 0x77b9, 0x131b: 0x77b9, 0x131c: 0x77b9, 0x131d: 0x77b9, + 0x131e: 0x77d1, 0x131f: 0x77d1, 0x1320: 0x77d1, 0x1321: 0x77d1, 0x1322: 0x77e9, 0x1323: 0x77e9, + 0x1324: 0x77e9, 0x1325: 0x77e9, 0x1326: 0x7801, 0x1327: 0x7801, 0x1328: 0x7801, 0x1329: 0x7801, + 0x132a: 0x7819, 0x132b: 0x7819, 0x132c: 0x7819, 0x132d: 0x7819, 0x132e: 0x7831, 0x132f: 0x7831, + 0x1330: 0x7831, 0x1331: 0x7831, 0x1332: 0x7849, 0x1333: 0x7849, 0x1334: 0x7849, 0x1335: 0x7849, + 0x1336: 0x7861, 0x1337: 0x7861, 0x1338: 0x7861, 0x1339: 0x7861, 0x133a: 0x7879, 0x133b: 0x7879, + 0x133c: 0x7879, 0x133d: 0x7879, 0x133e: 0x7891, 0x133f: 0x7891, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7891, 0x1341: 0x7891, 0x1342: 0x78a9, 0x1343: 0x78a9, 0x1344: 0x78c1, 0x1345: 0x78c1, + 0x1346: 0x78d9, 0x1347: 0x78d9, 0x1348: 0x78f1, 0x1349: 0x78f1, 0x134a: 0x7909, 0x134b: 0x7909, + 0x134c: 0x7921, 0x134d: 0x7921, 0x134e: 0x7939, 0x134f: 0x7939, 0x1350: 0x7939, 0x1351: 0x7939, + 0x1352: 0x7951, 0x1353: 0x7951, 0x1354: 0x7951, 0x1355: 0x7951, 0x1356: 0x7969, 0x1357: 0x7969, + 0x1358: 0x7969, 0x1359: 0x7969, 0x135a: 0x7981, 0x135b: 0x7981, 0x135c: 0x7981, 0x135d: 0x7981, + 0x135e: 0x7999, 0x135f: 0x7999, 0x1360: 0x79b1, 0x1361: 0x79b1, 0x1362: 0x79b1, 0x1363: 0x79b1, + 0x1364: 0x79c9, 0x1365: 0x79c9, 0x1366: 0x79e1, 0x1367: 0x79e1, 0x1368: 0x79e1, 0x1369: 0x79e1, + 0x136a: 0x79f9, 0x136b: 0x79f9, 0x136c: 0x79f9, 0x136d: 0x79f9, 0x136e: 0x7a11, 0x136f: 0x7a11, + 0x1370: 0x7a29, 0x1371: 0x7a29, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x7a41, 0x1394: 0x7a41, 0x1395: 0x7a41, 0x1396: 0x7a41, 0x1397: 0x7a59, + 0x1398: 0x7a59, 0x1399: 0x7a71, 0x139a: 0x7a71, 0x139b: 0x7a89, 0x139c: 0x7a89, 0x139d: 0x0479, + 0x139e: 0x7aa1, 0x139f: 0x7aa1, 0x13a0: 0x7ab9, 0x13a1: 0x7ab9, 0x13a2: 0x7ad1, 0x13a3: 0x7ad1, + 0x13a4: 0x7ae9, 0x13a5: 0x7ae9, 0x13a6: 0x7ae9, 0x13a7: 0x7ae9, 0x13a8: 0x7b01, 0x13a9: 0x7b01, + 0x13aa: 0x7b19, 0x13ab: 0x7b19, 0x13ac: 0x7b41, 0x13ad: 0x7b41, 0x13ae: 0x7b69, 0x13af: 0x7b69, + 0x13b0: 0x7b91, 0x13b1: 0x7b91, 0x13b2: 0x7bb9, 0x13b3: 0x7bb9, 0x13b4: 0x7be1, 0x13b5: 0x7be1, + 0x13b6: 0x7c09, 0x13b7: 0x7c09, 0x13b8: 0x7c09, 0x13b9: 0x7c31, 0x13ba: 0x7c31, 0x13bb: 0x7c31, + 0x13bc: 0x7c59, 0x13bd: 0x7c59, 0x13be: 0x7c59, 0x13bf: 0x7c59, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8649, 0x13c1: 0x8671, 0x13c2: 0x8699, 0x13c3: 0x86c1, 0x13c4: 0x86e9, 0x13c5: 0x8711, + 0x13c6: 0x8739, 0x13c7: 0x8761, 0x13c8: 0x8789, 0x13c9: 0x87b1, 0x13ca: 0x87d9, 0x13cb: 0x8801, + 0x13cc: 0x8829, 0x13cd: 0x8851, 0x13ce: 0x8879, 0x13cf: 0x88a1, 0x13d0: 0x88c9, 0x13d1: 0x88f1, + 0x13d2: 0x8919, 0x13d3: 0x8941, 0x13d4: 0x8969, 0x13d5: 0x8991, 0x13d6: 0x89b9, 0x13d7: 0x89e1, + 0x13d8: 0x8a09, 0x13d9: 0x8a31, 0x13da: 0x8a59, 0x13db: 0x8a81, 0x13dc: 0x8aa9, 0x13dd: 0x8ad1, + 0x13de: 0x8afa, 0x13df: 0x8b2a, 0x13e0: 0x8b5a, 0x13e1: 0x8b8a, 0x13e2: 0x8bba, 0x13e3: 0x8bea, + 0x13e4: 0x8c19, 0x13e5: 0x8c41, 0x13e6: 0x7cc1, 0x13e7: 0x8c69, 0x13e8: 0x7c31, 0x13e9: 0x7ce9, + 0x13ea: 0x8c91, 0x13eb: 0x8cb9, 0x13ec: 0x7d89, 0x13ed: 0x8ce1, 0x13ee: 0x7db1, 0x13ef: 0x7dd9, + 0x13f0: 0x8d09, 0x13f1: 0x8d31, 0x13f2: 0x7e79, 0x13f3: 0x8d59, 0x13f4: 0x7ea1, 0x13f5: 0x7ec9, + 0x13f6: 0x8d81, 0x13f7: 0x8da9, 0x13f8: 0x7f19, 0x13f9: 0x8dd1, 0x13fa: 0x7f41, 0x13fb: 0x7f69, + 0x13fc: 0x83f1, 0x13fd: 0x8419, 0x13fe: 0x8491, 0x13ff: 0x84b9, + // Block 0x50, offset 0x1400 + 0x1400: 0x84e1, 0x1401: 0x8581, 0x1402: 0x85a9, 0x1403: 0x85d1, 0x1404: 0x85f9, 0x1405: 0x8699, + 0x1406: 0x86c1, 0x1407: 0x86e9, 0x1408: 0x8df9, 0x1409: 0x8789, 0x140a: 0x8e21, 0x140b: 0x8e49, + 0x140c: 0x8879, 0x140d: 0x8e71, 0x140e: 0x88a1, 0x140f: 0x88c9, 0x1410: 0x8ad1, 0x1411: 0x8e99, + 0x1412: 0x8ec1, 0x1413: 0x8a09, 0x1414: 0x8ee9, 0x1415: 0x8a31, 0x1416: 0x8a59, 0x1417: 0x7c71, + 0x1418: 0x7c99, 0x1419: 0x8f11, 0x141a: 0x7cc1, 0x141b: 0x8f39, 0x141c: 0x7d11, 0x141d: 0x7d39, + 0x141e: 0x7d61, 0x141f: 0x7d89, 0x1420: 0x8f61, 0x1421: 0x7e01, 0x1422: 0x7e29, 0x1423: 0x7e51, + 0x1424: 0x7e79, 0x1425: 0x8f89, 0x1426: 0x7f19, 0x1427: 0x7f91, 0x1428: 0x7fb9, 0x1429: 0x7fe1, + 0x142a: 0x8009, 0x142b: 0x8031, 0x142c: 0x8081, 0x142d: 0x80a9, 0x142e: 0x80d1, 0x142f: 0x80f9, + 0x1430: 0x8121, 0x1431: 0x8149, 0x1432: 0x8fb1, 0x1433: 0x8171, 0x1434: 0x8199, 0x1435: 0x81c1, + 0x1436: 0x81e9, 0x1437: 0x8211, 0x1438: 0x8239, 0x1439: 0x8289, 0x143a: 0x82b1, 0x143b: 0x82d9, + 0x143c: 0x8301, 0x143d: 0x8329, 0x143e: 0x8351, 0x143f: 0x8379, + // Block 0x51, offset 0x1440 + 0x1440: 0x83a1, 0x1441: 0x83c9, 0x1442: 0x8441, 0x1443: 0x8469, 0x1444: 0x8509, 0x1445: 0x8531, + 0x1446: 0x8559, 0x1447: 0x8581, 0x1448: 0x85a9, 0x1449: 0x8621, 0x144a: 0x8649, 0x144b: 0x8671, + 0x144c: 0x8699, 0x144d: 0x8fd9, 0x144e: 0x8711, 0x144f: 0x8739, 0x1450: 0x8761, 0x1451: 0x8789, + 0x1452: 0x8801, 0x1453: 0x8829, 0x1454: 0x8851, 0x1455: 0x8879, 0x1456: 0x9001, 0x1457: 0x88f1, + 0x1458: 0x8919, 0x1459: 0x9029, 0x145a: 0x8991, 0x145b: 0x89b9, 0x145c: 0x89e1, 0x145d: 0x8a09, + 0x145e: 0x9051, 0x145f: 0x7cc1, 0x1460: 0x8f39, 0x1461: 0x7d89, 0x1462: 0x8f61, 0x1463: 0x7e79, + 0x1464: 0x8f89, 0x1465: 0x7f19, 0x1466: 0x9079, 0x1467: 0x8121, 0x1468: 0x90a1, 0x1469: 0x90c9, + 0x146a: 0x90f1, 0x146b: 0x8581, 0x146c: 0x85a9, 0x146d: 0x8699, 0x146e: 0x8879, 0x146f: 0x9001, + 0x1470: 0x8a09, 0x1471: 0x9051, 0x1472: 0x9119, 0x1473: 0x9151, 0x1474: 0x9189, 0x1475: 0x91c1, + 0x1476: 0x91e9, 0x1477: 0x9211, 0x1478: 0x9239, 0x1479: 0x9261, 0x147a: 0x9289, 0x147b: 0x92b1, + 0x147c: 0x92d9, 0x147d: 0x9301, 0x147e: 0x9329, 0x147f: 0x9351, + // Block 0x52, offset 0x1480 + 0x1480: 0x9379, 0x1481: 0x93a1, 0x1482: 0x93c9, 0x1483: 0x93f1, 0x1484: 0x9419, 0x1485: 0x9441, + 0x1486: 0x9469, 0x1487: 0x9491, 0x1488: 0x94b9, 0x1489: 0x94e1, 0x148a: 0x9509, 0x148b: 0x9531, + 0x148c: 0x90c9, 0x148d: 0x9559, 0x148e: 0x9581, 0x148f: 0x95a9, 0x1490: 0x95d1, 0x1491: 0x91c1, + 0x1492: 0x91e9, 0x1493: 0x9211, 0x1494: 0x9239, 0x1495: 0x9261, 0x1496: 0x9289, 0x1497: 0x92b1, + 0x1498: 0x92d9, 0x1499: 0x9301, 0x149a: 0x9329, 0x149b: 0x9351, 0x149c: 0x9379, 0x149d: 0x93a1, + 0x149e: 0x93c9, 0x149f: 0x93f1, 0x14a0: 0x9419, 0x14a1: 0x9441, 0x14a2: 0x9469, 0x14a3: 0x9491, + 0x14a4: 0x94b9, 0x14a5: 0x94e1, 0x14a6: 0x9509, 0x14a7: 0x9531, 0x14a8: 0x90c9, 0x14a9: 0x9559, + 0x14aa: 0x9581, 0x14ab: 0x95a9, 0x14ac: 0x95d1, 0x14ad: 0x94e1, 0x14ae: 0x9509, 0x14af: 0x9531, + 0x14b0: 0x90c9, 0x14b1: 0x90a1, 0x14b2: 0x90f1, 0x14b3: 0x8261, 0x14b4: 0x80a9, 0x14b5: 0x80d1, + 0x14b6: 0x80f9, 0x14b7: 0x94e1, 0x14b8: 0x9509, 0x14b9: 0x9531, 0x14ba: 0x8261, 0x14bb: 0x8289, + 0x14bc: 0x95f9, 0x14bd: 0x95f9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x9621, 0x14d1: 0x9659, + 0x14d2: 0x9659, 0x14d3: 0x9691, 0x14d4: 0x96c9, 0x14d5: 0x9701, 0x14d6: 0x9739, 0x14d7: 0x9771, + 0x14d8: 0x97a9, 0x14d9: 0x97a9, 0x14da: 0x97e1, 0x14db: 0x9819, 0x14dc: 0x9851, 0x14dd: 0x9889, + 0x14de: 0x98c1, 0x14df: 0x98f9, 0x14e0: 0x98f9, 0x14e1: 0x9931, 0x14e2: 0x9969, 0x14e3: 0x9969, + 0x14e4: 0x99a1, 0x14e5: 0x99a1, 0x14e6: 0x99d9, 0x14e7: 0x9a11, 0x14e8: 0x9a11, 0x14e9: 0x9a49, + 0x14ea: 0x9a81, 0x14eb: 0x9a81, 0x14ec: 0x9ab9, 0x14ed: 0x9ab9, 0x14ee: 0x9af1, 0x14ef: 0x9b29, + 0x14f0: 0x9b29, 0x14f1: 0x9b61, 0x14f2: 0x9b61, 0x14f3: 0x9b99, 0x14f4: 0x9bd1, 0x14f5: 0x9c09, + 0x14f6: 0x9c41, 0x14f7: 0x9c41, 0x14f8: 0x9c79, 0x14f9: 0x9cb1, 0x14fa: 0x9ce9, 0x14fb: 0x9d21, + 0x14fc: 0x9d59, 0x14fd: 0x9d59, 0x14fe: 0x9d91, 0x14ff: 0x9dc9, + // Block 0x54, offset 0x1500 + 0x1500: 0xa999, 0x1501: 0xa9d1, 0x1502: 0xaa09, 0x1503: 0xa8f1, 0x1504: 0x9c09, 0x1505: 0x99d9, + 0x1506: 0xaa41, 0x1507: 0xaa79, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaab1, 0x1531: 0xaae9, 0x1532: 0xab21, 0x1533: 0xab69, 0x1534: 0xabb1, 0x1535: 0xabf9, + 0x1536: 0xac41, 0x1537: 0xac89, 0x1538: 0xacd1, 0x1539: 0xad19, 0x153a: 0xad52, 0x153b: 0xae62, + 0x153c: 0xaee1, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaf2a, 0x1551: 0x7d8d, + 0x1552: 0x0040, 0x1553: 0xaf3a, 0x1554: 0x03c2, 0x1555: 0xaf4a, 0x1556: 0xaf5a, 0x1557: 0x7dad, + 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf6a, 0x1574: 0xaf6a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf7a, 0x1578: 0xaf8a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, + 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, + // Block 0x56, offset 0x1580 + 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf9a, 0x1588: 0xafaa, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, + 0x158c: 0x7fae, 0x158d: 0xaf6a, 0x158e: 0xaf6a, 0x158f: 0xaf6a, 0x1590: 0xaf2a, 0x1591: 0x7fcd, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaf3a, 0x1596: 0xaf5a, 0x1597: 0xaf4a, + 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf7a, 0x159c: 0xaf8a, 0x159d: 0x7ecd, + 0x159e: 0x7f2d, 0x159f: 0xafba, 0x15a0: 0xafca, 0x15a1: 0xafda, 0x15a2: 0x1fb2, 0x15a3: 0xafe9, + 0x15a4: 0xaffa, 0x15a5: 0xb00a, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xb01a, 0x15a9: 0xb02a, + 0x15aa: 0xb03a, 0x15ab: 0xb04a, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x800e, 0x15b1: 0xb059, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, + 0x15b6: 0x806e, 0x15b7: 0xb081, 0x15b8: 0x808e, 0x15b9: 0xb0a9, 0x15ba: 0x80ae, 0x15bb: 0xb0d1, + 0x15bc: 0x80ce, 0x15bd: 0xb0f9, 0x15be: 0x80ee, 0x15bf: 0xb121, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb149, 0x15c1: 0xb161, 0x15c2: 0xb161, 0x15c3: 0xb179, 0x15c4: 0xb179, 0x15c5: 0xb191, + 0x15c6: 0xb191, 0x15c7: 0xb1a9, 0x15c8: 0xb1a9, 0x15c9: 0xb1c1, 0x15ca: 0xb1c1, 0x15cb: 0xb1c1, + 0x15cc: 0xb1c1, 0x15cd: 0xb1d9, 0x15ce: 0xb1d9, 0x15cf: 0xb1f1, 0x15d0: 0xb1f1, 0x15d1: 0xb1f1, + 0x15d2: 0xb1f1, 0x15d3: 0xb209, 0x15d4: 0xb209, 0x15d5: 0xb221, 0x15d6: 0xb221, 0x15d7: 0xb221, + 0x15d8: 0xb221, 0x15d9: 0xb239, 0x15da: 0xb239, 0x15db: 0xb239, 0x15dc: 0xb239, 0x15dd: 0xb251, + 0x15de: 0xb251, 0x15df: 0xb251, 0x15e0: 0xb251, 0x15e1: 0xb269, 0x15e2: 0xb269, 0x15e3: 0xb269, + 0x15e4: 0xb269, 0x15e5: 0xb281, 0x15e6: 0xb281, 0x15e7: 0xb281, 0x15e8: 0xb281, 0x15e9: 0xb299, + 0x15ea: 0xb299, 0x15eb: 0xb2b1, 0x15ec: 0xb2b1, 0x15ed: 0xb2c9, 0x15ee: 0xb2c9, 0x15ef: 0xb2e1, + 0x15f0: 0xb2e1, 0x15f1: 0xb2f9, 0x15f2: 0xb2f9, 0x15f3: 0xb2f9, 0x15f4: 0xb2f9, 0x15f5: 0xb311, + 0x15f6: 0xb311, 0x15f7: 0xb311, 0x15f8: 0xb311, 0x15f9: 0xb329, 0x15fa: 0xb329, 0x15fb: 0xb329, + 0x15fc: 0xb329, 0x15fd: 0xb341, 0x15fe: 0xb341, 0x15ff: 0xb341, + // Block 0x58, offset 0x1600 + 0x1600: 0xb341, 0x1601: 0xb359, 0x1602: 0xb359, 0x1603: 0xb359, 0x1604: 0xb359, 0x1605: 0xb371, + 0x1606: 0xb371, 0x1607: 0xb371, 0x1608: 0xb371, 0x1609: 0xb389, 0x160a: 0xb389, 0x160b: 0xb389, + 0x160c: 0xb389, 0x160d: 0xb3a1, 0x160e: 0xb3a1, 0x160f: 0xb3a1, 0x1610: 0xb3a1, 0x1611: 0xb3b9, + 0x1612: 0xb3b9, 0x1613: 0xb3b9, 0x1614: 0xb3b9, 0x1615: 0xb3d1, 0x1616: 0xb3d1, 0x1617: 0xb3d1, + 0x1618: 0xb3d1, 0x1619: 0xb3e9, 0x161a: 0xb3e9, 0x161b: 0xb3e9, 0x161c: 0xb3e9, 0x161d: 0xb401, + 0x161e: 0xb401, 0x161f: 0xb401, 0x1620: 0xb401, 0x1621: 0xb419, 0x1622: 0xb419, 0x1623: 0xb419, + 0x1624: 0xb419, 0x1625: 0xb431, 0x1626: 0xb431, 0x1627: 0xb431, 0x1628: 0xb431, 0x1629: 0xb449, + 0x162a: 0xb449, 0x162b: 0xb449, 0x162c: 0xb449, 0x162d: 0xb461, 0x162e: 0xb461, 0x162f: 0x7b01, + 0x1630: 0x7b01, 0x1631: 0xb479, 0x1632: 0xb479, 0x1633: 0xb479, 0x1634: 0xb479, 0x1635: 0xb491, + 0x1636: 0xb491, 0x1637: 0xb4b9, 0x1638: 0xb4b9, 0x1639: 0xb4e1, 0x163a: 0xb4e1, 0x163b: 0xb509, + 0x163c: 0xb509, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaf4a, 0x1642: 0xb532, 0x1643: 0xafba, 0x1644: 0xb02a, 0x1645: 0xb03a, + 0x1646: 0xafca, 0x1647: 0xb542, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xafda, 0x164b: 0x1fb2, + 0x164c: 0xaf2a, 0x164d: 0xafe9, 0x164e: 0x29d1, 0x164f: 0xb552, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaf3a, 0x165b: 0x03c2, 0x165c: 0xaffa, 0x165d: 0x1fc2, + 0x165e: 0xb00a, 0x165f: 0xaf5a, 0x1660: 0xb04a, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf9a, + 0x167c: 0xb01a, 0x167d: 0xafaa, 0x167e: 0xb562, 0x167f: 0xaf6a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf7a, 0x169c: 0xb572, 0x169d: 0xaf8a, + 0x169e: 0xb582, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, + 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, + 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, + 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, + 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, + 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, + 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, + 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, + 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, + 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, + 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, + 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, + 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, + 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, + 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, + 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, + 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, + 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb591, 0x1721: 0xb5a9, 0x1722: 0xb5c1, 0x1723: 0x8a0e, + 0x1724: 0xb5d9, 0x1725: 0xb5f1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, + 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0008, 0x1841: 0x0008, 0x1842: 0x0008, 0x1843: 0x0008, 0x1844: 0x0008, 0x1845: 0x0008, + 0x1846: 0x0008, 0x1847: 0x0040, 0x1848: 0x0040, 0x1849: 0x0008, 0x184a: 0x0040, 0x184b: 0x0040, + 0x184c: 0x0008, 0x184d: 0x0008, 0x184e: 0x0008, 0x184f: 0x0008, 0x1850: 0x0008, 0x1851: 0x0008, + 0x1852: 0x0008, 0x1853: 0x0008, 0x1854: 0x0040, 0x1855: 0x0008, 0x1856: 0x0008, 0x1857: 0x0040, + 0x1858: 0x0008, 0x1859: 0x0008, 0x185a: 0x0008, 0x185b: 0x0008, 0x185c: 0x0008, 0x185d: 0x0008, + 0x185e: 0x0008, 0x185f: 0x0008, 0x1860: 0x0008, 0x1861: 0x0008, 0x1862: 0x0008, 0x1863: 0x0008, + 0x1864: 0x0008, 0x1865: 0x0008, 0x1866: 0x0008, 0x1867: 0x0008, 0x1868: 0x0008, 0x1869: 0x0008, + 0x186a: 0x0008, 0x186b: 0x0008, 0x186c: 0x0008, 0x186d: 0x0008, 0x186e: 0x0008, 0x186f: 0x0008, + 0x1870: 0x3008, 0x1871: 0x3008, 0x1872: 0x3008, 0x1873: 0x3008, 0x1874: 0x3008, 0x1875: 0x3008, + 0x1876: 0x0040, 0x1877: 0x3008, 0x1878: 0x3008, 0x1879: 0x0040, 0x187a: 0x0040, 0x187b: 0x3308, + 0x187c: 0x3308, 0x187d: 0x3808, 0x187e: 0x3b08, 0x187f: 0x0008, + // Block 0x62, offset 0x1880 + 0x1880: 0x0039, 0x1881: 0x0ee9, 0x1882: 0x1159, 0x1883: 0x0ef9, 0x1884: 0x0f09, 0x1885: 0x1199, + 0x1886: 0x0f31, 0x1887: 0x0249, 0x1888: 0x0f41, 0x1889: 0x0259, 0x188a: 0x0f51, 0x188b: 0x0359, + 0x188c: 0x0f61, 0x188d: 0x0f71, 0x188e: 0x00d9, 0x188f: 0x0f99, 0x1890: 0x2039, 0x1891: 0x0269, + 0x1892: 0x01d9, 0x1893: 0x0fa9, 0x1894: 0x0fb9, 0x1895: 0x1089, 0x1896: 0x0279, 0x1897: 0x0369, + 0x1898: 0x0289, 0x1899: 0x13d1, 0x189a: 0x0039, 0x189b: 0x0ee9, 0x189c: 0x1159, 0x189d: 0x0ef9, + 0x189e: 0x0f09, 0x189f: 0x1199, 0x18a0: 0x0f31, 0x18a1: 0x0249, 0x18a2: 0x0f41, 0x18a3: 0x0259, + 0x18a4: 0x0f51, 0x18a5: 0x0359, 0x18a6: 0x0f61, 0x18a7: 0x0f71, 0x18a8: 0x00d9, 0x18a9: 0x0f99, + 0x18aa: 0x2039, 0x18ab: 0x0269, 0x18ac: 0x01d9, 0x18ad: 0x0fa9, 0x18ae: 0x0fb9, 0x18af: 0x1089, + 0x18b0: 0x0279, 0x18b1: 0x0369, 0x18b2: 0x0289, 0x18b3: 0x13d1, 0x18b4: 0x0039, 0x18b5: 0x0ee9, + 0x18b6: 0x1159, 0x18b7: 0x0ef9, 0x18b8: 0x0f09, 0x18b9: 0x1199, 0x18ba: 0x0f31, 0x18bb: 0x0249, + 0x18bc: 0x0f41, 0x18bd: 0x0259, 0x18be: 0x0f51, 0x18bf: 0x0359, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f61, 0x18c1: 0x0f71, 0x18c2: 0x00d9, 0x18c3: 0x0f99, 0x18c4: 0x2039, 0x18c5: 0x0269, + 0x18c6: 0x01d9, 0x18c7: 0x0fa9, 0x18c8: 0x0fb9, 0x18c9: 0x1089, 0x18ca: 0x0279, 0x18cb: 0x0369, + 0x18cc: 0x0289, 0x18cd: 0x13d1, 0x18ce: 0x0039, 0x18cf: 0x0ee9, 0x18d0: 0x1159, 0x18d1: 0x0ef9, + 0x18d2: 0x0f09, 0x18d3: 0x1199, 0x18d4: 0x0f31, 0x18d5: 0x0040, 0x18d6: 0x0f41, 0x18d7: 0x0259, + 0x18d8: 0x0f51, 0x18d9: 0x0359, 0x18da: 0x0f61, 0x18db: 0x0f71, 0x18dc: 0x00d9, 0x18dd: 0x0f99, + 0x18de: 0x2039, 0x18df: 0x0269, 0x18e0: 0x01d9, 0x18e1: 0x0fa9, 0x18e2: 0x0fb9, 0x18e3: 0x1089, + 0x18e4: 0x0279, 0x18e5: 0x0369, 0x18e6: 0x0289, 0x18e7: 0x13d1, 0x18e8: 0x0039, 0x18e9: 0x0ee9, + 0x18ea: 0x1159, 0x18eb: 0x0ef9, 0x18ec: 0x0f09, 0x18ed: 0x1199, 0x18ee: 0x0f31, 0x18ef: 0x0249, + 0x18f0: 0x0f41, 0x18f1: 0x0259, 0x18f2: 0x0f51, 0x18f3: 0x0359, 0x18f4: 0x0f61, 0x18f5: 0x0f71, + 0x18f6: 0x00d9, 0x18f7: 0x0f99, 0x18f8: 0x2039, 0x18f9: 0x0269, 0x18fa: 0x01d9, 0x18fb: 0x0fa9, + 0x18fc: 0x0fb9, 0x18fd: 0x1089, 0x18fe: 0x0279, 0x18ff: 0x0369, + // Block 0x64, offset 0x1900 + 0x1900: 0x0289, 0x1901: 0x13d1, 0x1902: 0x0039, 0x1903: 0x0ee9, 0x1904: 0x1159, 0x1905: 0x0ef9, + 0x1906: 0x0f09, 0x1907: 0x1199, 0x1908: 0x0f31, 0x1909: 0x0249, 0x190a: 0x0f41, 0x190b: 0x0259, + 0x190c: 0x0f51, 0x190d: 0x0359, 0x190e: 0x0f61, 0x190f: 0x0f71, 0x1910: 0x00d9, 0x1911: 0x0f99, + 0x1912: 0x2039, 0x1913: 0x0269, 0x1914: 0x01d9, 0x1915: 0x0fa9, 0x1916: 0x0fb9, 0x1917: 0x1089, + 0x1918: 0x0279, 0x1919: 0x0369, 0x191a: 0x0289, 0x191b: 0x13d1, 0x191c: 0x0039, 0x191d: 0x0040, + 0x191e: 0x1159, 0x191f: 0x0ef9, 0x1920: 0x0040, 0x1921: 0x0040, 0x1922: 0x0f31, 0x1923: 0x0040, + 0x1924: 0x0040, 0x1925: 0x0259, 0x1926: 0x0f51, 0x1927: 0x0040, 0x1928: 0x0040, 0x1929: 0x0f71, + 0x192a: 0x00d9, 0x192b: 0x0f99, 0x192c: 0x2039, 0x192d: 0x0040, 0x192e: 0x01d9, 0x192f: 0x0fa9, + 0x1930: 0x0fb9, 0x1931: 0x1089, 0x1932: 0x0279, 0x1933: 0x0369, 0x1934: 0x0289, 0x1935: 0x13d1, + 0x1936: 0x0039, 0x1937: 0x0ee9, 0x1938: 0x1159, 0x1939: 0x0ef9, 0x193a: 0x0040, 0x193b: 0x1199, + 0x193c: 0x0040, 0x193d: 0x0249, 0x193e: 0x0f41, 0x193f: 0x0259, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f51, 0x1941: 0x0359, 0x1942: 0x0f61, 0x1943: 0x0f71, 0x1944: 0x0040, 0x1945: 0x0f99, + 0x1946: 0x2039, 0x1947: 0x0269, 0x1948: 0x01d9, 0x1949: 0x0fa9, 0x194a: 0x0fb9, 0x194b: 0x1089, + 0x194c: 0x0279, 0x194d: 0x0369, 0x194e: 0x0289, 0x194f: 0x13d1, 0x1950: 0x0039, 0x1951: 0x0ee9, + 0x1952: 0x1159, 0x1953: 0x0ef9, 0x1954: 0x0f09, 0x1955: 0x1199, 0x1956: 0x0f31, 0x1957: 0x0249, + 0x1958: 0x0f41, 0x1959: 0x0259, 0x195a: 0x0f51, 0x195b: 0x0359, 0x195c: 0x0f61, 0x195d: 0x0f71, + 0x195e: 0x00d9, 0x195f: 0x0f99, 0x1960: 0x2039, 0x1961: 0x0269, 0x1962: 0x01d9, 0x1963: 0x0fa9, + 0x1964: 0x0fb9, 0x1965: 0x1089, 0x1966: 0x0279, 0x1967: 0x0369, 0x1968: 0x0289, 0x1969: 0x13d1, + 0x196a: 0x0039, 0x196b: 0x0ee9, 0x196c: 0x1159, 0x196d: 0x0ef9, 0x196e: 0x0f09, 0x196f: 0x1199, + 0x1970: 0x0f31, 0x1971: 0x0249, 0x1972: 0x0f41, 0x1973: 0x0259, 0x1974: 0x0f51, 0x1975: 0x0359, + 0x1976: 0x0f61, 0x1977: 0x0f71, 0x1978: 0x00d9, 0x1979: 0x0f99, 0x197a: 0x2039, 0x197b: 0x0269, + 0x197c: 0x01d9, 0x197d: 0x0fa9, 0x197e: 0x0fb9, 0x197f: 0x1089, + // Block 0x66, offset 0x1980 + 0x1980: 0x0279, 0x1981: 0x0369, 0x1982: 0x0289, 0x1983: 0x13d1, 0x1984: 0x0039, 0x1985: 0x0ee9, + 0x1986: 0x0040, 0x1987: 0x0ef9, 0x1988: 0x0f09, 0x1989: 0x1199, 0x198a: 0x0f31, 0x198b: 0x0040, + 0x198c: 0x0040, 0x198d: 0x0259, 0x198e: 0x0f51, 0x198f: 0x0359, 0x1990: 0x0f61, 0x1991: 0x0f71, + 0x1992: 0x00d9, 0x1993: 0x0f99, 0x1994: 0x2039, 0x1995: 0x0040, 0x1996: 0x01d9, 0x1997: 0x0fa9, + 0x1998: 0x0fb9, 0x1999: 0x1089, 0x199a: 0x0279, 0x199b: 0x0369, 0x199c: 0x0289, 0x199d: 0x0040, + 0x199e: 0x0039, 0x199f: 0x0ee9, 0x19a0: 0x1159, 0x19a1: 0x0ef9, 0x19a2: 0x0f09, 0x19a3: 0x1199, + 0x19a4: 0x0f31, 0x19a5: 0x0249, 0x19a6: 0x0f41, 0x19a7: 0x0259, 0x19a8: 0x0f51, 0x19a9: 0x0359, + 0x19aa: 0x0f61, 0x19ab: 0x0f71, 0x19ac: 0x00d9, 0x19ad: 0x0f99, 0x19ae: 0x2039, 0x19af: 0x0269, + 0x19b0: 0x01d9, 0x19b1: 0x0fa9, 0x19b2: 0x0fb9, 0x19b3: 0x1089, 0x19b4: 0x0279, 0x19b5: 0x0369, + 0x19b6: 0x0289, 0x19b7: 0x13d1, 0x19b8: 0x0039, 0x19b9: 0x0ee9, 0x19ba: 0x0040, 0x19bb: 0x0ef9, + 0x19bc: 0x0f09, 0x19bd: 0x1199, 0x19be: 0x0f31, 0x19bf: 0x0040, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f41, 0x19c1: 0x0259, 0x19c2: 0x0f51, 0x19c3: 0x0359, 0x19c4: 0x0f61, 0x19c5: 0x0040, + 0x19c6: 0x00d9, 0x19c7: 0x0040, 0x19c8: 0x0040, 0x19c9: 0x0040, 0x19ca: 0x01d9, 0x19cb: 0x0fa9, + 0x19cc: 0x0fb9, 0x19cd: 0x1089, 0x19ce: 0x0279, 0x19cf: 0x0369, 0x19d0: 0x0289, 0x19d1: 0x0040, + 0x19d2: 0x0039, 0x19d3: 0x0ee9, 0x19d4: 0x1159, 0x19d5: 0x0ef9, 0x19d6: 0x0f09, 0x19d7: 0x1199, + 0x19d8: 0x0f31, 0x19d9: 0x0249, 0x19da: 0x0f41, 0x19db: 0x0259, 0x19dc: 0x0f51, 0x19dd: 0x0359, + 0x19de: 0x0f61, 0x19df: 0x0f71, 0x19e0: 0x00d9, 0x19e1: 0x0f99, 0x19e2: 0x2039, 0x19e3: 0x0269, + 0x19e4: 0x01d9, 0x19e5: 0x0fa9, 0x19e6: 0x0fb9, 0x19e7: 0x1089, 0x19e8: 0x0279, 0x19e9: 0x0369, + 0x19ea: 0x0289, 0x19eb: 0x13d1, 0x19ec: 0x0039, 0x19ed: 0x0ee9, 0x19ee: 0x1159, 0x19ef: 0x0ef9, + 0x19f0: 0x0f09, 0x19f1: 0x1199, 0x19f2: 0x0f31, 0x19f3: 0x0249, 0x19f4: 0x0f41, 0x19f5: 0x0259, + 0x19f6: 0x0f51, 0x19f7: 0x0359, 0x19f8: 0x0f61, 0x19f9: 0x0f71, 0x19fa: 0x00d9, 0x19fb: 0x0f99, + 0x19fc: 0x2039, 0x19fd: 0x0269, 0x19fe: 0x01d9, 0x19ff: 0x0fa9, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0fb9, 0x1a01: 0x1089, 0x1a02: 0x0279, 0x1a03: 0x0369, 0x1a04: 0x0289, 0x1a05: 0x13d1, + 0x1a06: 0x0039, 0x1a07: 0x0ee9, 0x1a08: 0x1159, 0x1a09: 0x0ef9, 0x1a0a: 0x0f09, 0x1a0b: 0x1199, + 0x1a0c: 0x0f31, 0x1a0d: 0x0249, 0x1a0e: 0x0f41, 0x1a0f: 0x0259, 0x1a10: 0x0f51, 0x1a11: 0x0359, + 0x1a12: 0x0f61, 0x1a13: 0x0f71, 0x1a14: 0x00d9, 0x1a15: 0x0f99, 0x1a16: 0x2039, 0x1a17: 0x0269, + 0x1a18: 0x01d9, 0x1a19: 0x0fa9, 0x1a1a: 0x0fb9, 0x1a1b: 0x1089, 0x1a1c: 0x0279, 0x1a1d: 0x0369, + 0x1a1e: 0x0289, 0x1a1f: 0x13d1, 0x1a20: 0x0039, 0x1a21: 0x0ee9, 0x1a22: 0x1159, 0x1a23: 0x0ef9, + 0x1a24: 0x0f09, 0x1a25: 0x1199, 0x1a26: 0x0f31, 0x1a27: 0x0249, 0x1a28: 0x0f41, 0x1a29: 0x0259, + 0x1a2a: 0x0f51, 0x1a2b: 0x0359, 0x1a2c: 0x0f61, 0x1a2d: 0x0f71, 0x1a2e: 0x00d9, 0x1a2f: 0x0f99, + 0x1a30: 0x2039, 0x1a31: 0x0269, 0x1a32: 0x01d9, 0x1a33: 0x0fa9, 0x1a34: 0x0fb9, 0x1a35: 0x1089, + 0x1a36: 0x0279, 0x1a37: 0x0369, 0x1a38: 0x0289, 0x1a39: 0x13d1, 0x1a3a: 0x0039, 0x1a3b: 0x0ee9, + 0x1a3c: 0x1159, 0x1a3d: 0x0ef9, 0x1a3e: 0x0f09, 0x1a3f: 0x1199, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f31, 0x1a41: 0x0249, 0x1a42: 0x0f41, 0x1a43: 0x0259, 0x1a44: 0x0f51, 0x1a45: 0x0359, + 0x1a46: 0x0f61, 0x1a47: 0x0f71, 0x1a48: 0x00d9, 0x1a49: 0x0f99, 0x1a4a: 0x2039, 0x1a4b: 0x0269, + 0x1a4c: 0x01d9, 0x1a4d: 0x0fa9, 0x1a4e: 0x0fb9, 0x1a4f: 0x1089, 0x1a50: 0x0279, 0x1a51: 0x0369, + 0x1a52: 0x0289, 0x1a53: 0x13d1, 0x1a54: 0x0039, 0x1a55: 0x0ee9, 0x1a56: 0x1159, 0x1a57: 0x0ef9, + 0x1a58: 0x0f09, 0x1a59: 0x1199, 0x1a5a: 0x0f31, 0x1a5b: 0x0249, 0x1a5c: 0x0f41, 0x1a5d: 0x0259, + 0x1a5e: 0x0f51, 0x1a5f: 0x0359, 0x1a60: 0x0f61, 0x1a61: 0x0f71, 0x1a62: 0x00d9, 0x1a63: 0x0f99, + 0x1a64: 0x2039, 0x1a65: 0x0269, 0x1a66: 0x01d9, 0x1a67: 0x0fa9, 0x1a68: 0x0fb9, 0x1a69: 0x1089, + 0x1a6a: 0x0279, 0x1a6b: 0x0369, 0x1a6c: 0x0289, 0x1a6d: 0x13d1, 0x1a6e: 0x0039, 0x1a6f: 0x0ee9, + 0x1a70: 0x1159, 0x1a71: 0x0ef9, 0x1a72: 0x0f09, 0x1a73: 0x1199, 0x1a74: 0x0f31, 0x1a75: 0x0249, + 0x1a76: 0x0f41, 0x1a77: 0x0259, 0x1a78: 0x0f51, 0x1a79: 0x0359, 0x1a7a: 0x0f61, 0x1a7b: 0x0f71, + 0x1a7c: 0x00d9, 0x1a7d: 0x0f99, 0x1a7e: 0x2039, 0x1a7f: 0x0269, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x01d9, 0x1a81: 0x0fa9, 0x1a82: 0x0fb9, 0x1a83: 0x1089, 0x1a84: 0x0279, 0x1a85: 0x0369, + 0x1a86: 0x0289, 0x1a87: 0x13d1, 0x1a88: 0x0039, 0x1a89: 0x0ee9, 0x1a8a: 0x1159, 0x1a8b: 0x0ef9, + 0x1a8c: 0x0f09, 0x1a8d: 0x1199, 0x1a8e: 0x0f31, 0x1a8f: 0x0249, 0x1a90: 0x0f41, 0x1a91: 0x0259, + 0x1a92: 0x0f51, 0x1a93: 0x0359, 0x1a94: 0x0f61, 0x1a95: 0x0f71, 0x1a96: 0x00d9, 0x1a97: 0x0f99, + 0x1a98: 0x2039, 0x1a99: 0x0269, 0x1a9a: 0x01d9, 0x1a9b: 0x0fa9, 0x1a9c: 0x0fb9, 0x1a9d: 0x1089, + 0x1a9e: 0x0279, 0x1a9f: 0x0369, 0x1aa0: 0x0289, 0x1aa1: 0x13d1, 0x1aa2: 0x0039, 0x1aa3: 0x0ee9, + 0x1aa4: 0x1159, 0x1aa5: 0x0ef9, 0x1aa6: 0x0f09, 0x1aa7: 0x1199, 0x1aa8: 0x0f31, 0x1aa9: 0x0249, + 0x1aaa: 0x0f41, 0x1aab: 0x0259, 0x1aac: 0x0f51, 0x1aad: 0x0359, 0x1aae: 0x0f61, 0x1aaf: 0x0f71, + 0x1ab0: 0x00d9, 0x1ab1: 0x0f99, 0x1ab2: 0x2039, 0x1ab3: 0x0269, 0x1ab4: 0x01d9, 0x1ab5: 0x0fa9, + 0x1ab6: 0x0fb9, 0x1ab7: 0x1089, 0x1ab8: 0x0279, 0x1ab9: 0x0369, 0x1aba: 0x0289, 0x1abb: 0x13d1, + 0x1abc: 0x0039, 0x1abd: 0x0ee9, 0x1abe: 0x1159, 0x1abf: 0x0ef9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0f09, 0x1ac1: 0x1199, 0x1ac2: 0x0f31, 0x1ac3: 0x0249, 0x1ac4: 0x0f41, 0x1ac5: 0x0259, + 0x1ac6: 0x0f51, 0x1ac7: 0x0359, 0x1ac8: 0x0f61, 0x1ac9: 0x0f71, 0x1aca: 0x00d9, 0x1acb: 0x0f99, + 0x1acc: 0x2039, 0x1acd: 0x0269, 0x1ace: 0x01d9, 0x1acf: 0x0fa9, 0x1ad0: 0x0fb9, 0x1ad1: 0x1089, + 0x1ad2: 0x0279, 0x1ad3: 0x0369, 0x1ad4: 0x0289, 0x1ad5: 0x13d1, 0x1ad6: 0x0039, 0x1ad7: 0x0ee9, + 0x1ad8: 0x1159, 0x1ad9: 0x0ef9, 0x1ada: 0x0f09, 0x1adb: 0x1199, 0x1adc: 0x0f31, 0x1add: 0x0249, + 0x1ade: 0x0f41, 0x1adf: 0x0259, 0x1ae0: 0x0f51, 0x1ae1: 0x0359, 0x1ae2: 0x0f61, 0x1ae3: 0x0f71, + 0x1ae4: 0x00d9, 0x1ae5: 0x0f99, 0x1ae6: 0x2039, 0x1ae7: 0x0269, 0x1ae8: 0x01d9, 0x1ae9: 0x0fa9, + 0x1aea: 0x0fb9, 0x1aeb: 0x1089, 0x1aec: 0x0279, 0x1aed: 0x0369, 0x1aee: 0x0289, 0x1aef: 0x13d1, + 0x1af0: 0x0039, 0x1af1: 0x0ee9, 0x1af2: 0x1159, 0x1af3: 0x0ef9, 0x1af4: 0x0f09, 0x1af5: 0x1199, + 0x1af6: 0x0f31, 0x1af7: 0x0249, 0x1af8: 0x0f41, 0x1af9: 0x0259, 0x1afa: 0x0f51, 0x1afb: 0x0359, + 0x1afc: 0x0f61, 0x1afd: 0x0f71, 0x1afe: 0x00d9, 0x1aff: 0x0f99, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2039, 0x1b01: 0x0269, 0x1b02: 0x01d9, 0x1b03: 0x0fa9, 0x1b04: 0x0fb9, 0x1b05: 0x1089, + 0x1b06: 0x0279, 0x1b07: 0x0369, 0x1b08: 0x0289, 0x1b09: 0x13d1, 0x1b0a: 0x0039, 0x1b0b: 0x0ee9, + 0x1b0c: 0x1159, 0x1b0d: 0x0ef9, 0x1b0e: 0x0f09, 0x1b0f: 0x1199, 0x1b10: 0x0f31, 0x1b11: 0x0249, + 0x1b12: 0x0f41, 0x1b13: 0x0259, 0x1b14: 0x0f51, 0x1b15: 0x0359, 0x1b16: 0x0f61, 0x1b17: 0x0f71, + 0x1b18: 0x00d9, 0x1b19: 0x0f99, 0x1b1a: 0x2039, 0x1b1b: 0x0269, 0x1b1c: 0x01d9, 0x1b1d: 0x0fa9, + 0x1b1e: 0x0fb9, 0x1b1f: 0x1089, 0x1b20: 0x0279, 0x1b21: 0x0369, 0x1b22: 0x0289, 0x1b23: 0x13d1, + 0x1b24: 0xbad1, 0x1b25: 0xbae9, 0x1b26: 0x0040, 0x1b27: 0x0040, 0x1b28: 0xbb01, 0x1b29: 0x1099, + 0x1b2a: 0x10b1, 0x1b2b: 0x10c9, 0x1b2c: 0xbb19, 0x1b2d: 0xbb31, 0x1b2e: 0xbb49, 0x1b2f: 0x1429, + 0x1b30: 0x1a31, 0x1b31: 0xbb61, 0x1b32: 0xbb79, 0x1b33: 0xbb91, 0x1b34: 0xbba9, 0x1b35: 0xbbc1, + 0x1b36: 0xbbd9, 0x1b37: 0x2109, 0x1b38: 0x1111, 0x1b39: 0x1429, 0x1b3a: 0xbbf1, 0x1b3b: 0xbc09, + 0x1b3c: 0xbc21, 0x1b3d: 0x10e1, 0x1b3e: 0x10f9, 0x1b3f: 0xbc39, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x2079, 0x1b41: 0xbc51, 0x1b42: 0xbb01, 0x1b43: 0x1099, 0x1b44: 0x10b1, 0x1b45: 0x10c9, + 0x1b46: 0xbb19, 0x1b47: 0xbb31, 0x1b48: 0xbb49, 0x1b49: 0x1429, 0x1b4a: 0x1a31, 0x1b4b: 0xbb61, + 0x1b4c: 0xbb79, 0x1b4d: 0xbb91, 0x1b4e: 0xbba9, 0x1b4f: 0xbbc1, 0x1b50: 0xbbd9, 0x1b51: 0x2109, + 0x1b52: 0x1111, 0x1b53: 0xbbf1, 0x1b54: 0xbbf1, 0x1b55: 0xbc09, 0x1b56: 0xbc21, 0x1b57: 0x10e1, + 0x1b58: 0x10f9, 0x1b59: 0xbc39, 0x1b5a: 0x2079, 0x1b5b: 0xbc71, 0x1b5c: 0xbb19, 0x1b5d: 0x1429, + 0x1b5e: 0xbb61, 0x1b5f: 0x10e1, 0x1b60: 0x1111, 0x1b61: 0x2109, 0x1b62: 0xbb01, 0x1b63: 0x1099, + 0x1b64: 0x10b1, 0x1b65: 0x10c9, 0x1b66: 0xbb19, 0x1b67: 0xbb31, 0x1b68: 0xbb49, 0x1b69: 0x1429, + 0x1b6a: 0x1a31, 0x1b6b: 0xbb61, 0x1b6c: 0xbb79, 0x1b6d: 0xbb91, 0x1b6e: 0xbba9, 0x1b6f: 0xbbc1, + 0x1b70: 0xbbd9, 0x1b71: 0x2109, 0x1b72: 0x1111, 0x1b73: 0x1429, 0x1b74: 0xbbf1, 0x1b75: 0xbc09, + 0x1b76: 0xbc21, 0x1b77: 0x10e1, 0x1b78: 0x10f9, 0x1b79: 0xbc39, 0x1b7a: 0x2079, 0x1b7b: 0xbc51, + 0x1b7c: 0xbb01, 0x1b7d: 0x1099, 0x1b7e: 0x10b1, 0x1b7f: 0x10c9, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb19, 0x1b81: 0xbb31, 0x1b82: 0xbb49, 0x1b83: 0x1429, 0x1b84: 0x1a31, 0x1b85: 0xbb61, + 0x1b86: 0xbb79, 0x1b87: 0xbb91, 0x1b88: 0xbba9, 0x1b89: 0xbbc1, 0x1b8a: 0xbbd9, 0x1b8b: 0x2109, + 0x1b8c: 0x1111, 0x1b8d: 0xbbf1, 0x1b8e: 0xbbf1, 0x1b8f: 0xbc09, 0x1b90: 0xbc21, 0x1b91: 0x10e1, + 0x1b92: 0x10f9, 0x1b93: 0xbc39, 0x1b94: 0x2079, 0x1b95: 0xbc71, 0x1b96: 0xbb19, 0x1b97: 0x1429, + 0x1b98: 0xbb61, 0x1b99: 0x10e1, 0x1b9a: 0x1111, 0x1b9b: 0x2109, 0x1b9c: 0xbb01, 0x1b9d: 0x1099, + 0x1b9e: 0x10b1, 0x1b9f: 0x10c9, 0x1ba0: 0xbb19, 0x1ba1: 0xbb31, 0x1ba2: 0xbb49, 0x1ba3: 0x1429, + 0x1ba4: 0x1a31, 0x1ba5: 0xbb61, 0x1ba6: 0xbb79, 0x1ba7: 0xbb91, 0x1ba8: 0xbba9, 0x1ba9: 0xbbc1, + 0x1baa: 0xbbd9, 0x1bab: 0x2109, 0x1bac: 0x1111, 0x1bad: 0x1429, 0x1bae: 0xbbf1, 0x1baf: 0xbc09, + 0x1bb0: 0xbc21, 0x1bb1: 0x10e1, 0x1bb2: 0x10f9, 0x1bb3: 0xbc39, 0x1bb4: 0x2079, 0x1bb5: 0xbc51, + 0x1bb6: 0xbb01, 0x1bb7: 0x1099, 0x1bb8: 0x10b1, 0x1bb9: 0x10c9, 0x1bba: 0xbb19, 0x1bbb: 0xbb31, + 0x1bbc: 0xbb49, 0x1bbd: 0x1429, 0x1bbe: 0x1a31, 0x1bbf: 0xbb61, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0xbb79, 0x1bc1: 0xbb91, 0x1bc2: 0xbba9, 0x1bc3: 0xbbc1, 0x1bc4: 0xbbd9, 0x1bc5: 0x2109, + 0x1bc6: 0x1111, 0x1bc7: 0xbbf1, 0x1bc8: 0xbbf1, 0x1bc9: 0xbc09, 0x1bca: 0xbc21, 0x1bcb: 0x10e1, + 0x1bcc: 0x10f9, 0x1bcd: 0xbc39, 0x1bce: 0x2079, 0x1bcf: 0xbc71, 0x1bd0: 0xbb19, 0x1bd1: 0x1429, + 0x1bd2: 0xbb61, 0x1bd3: 0x10e1, 0x1bd4: 0x1111, 0x1bd5: 0x2109, 0x1bd6: 0xbb01, 0x1bd7: 0x1099, + 0x1bd8: 0x10b1, 0x1bd9: 0x10c9, 0x1bda: 0xbb19, 0x1bdb: 0xbb31, 0x1bdc: 0xbb49, 0x1bdd: 0x1429, + 0x1bde: 0x1a31, 0x1bdf: 0xbb61, 0x1be0: 0xbb79, 0x1be1: 0xbb91, 0x1be2: 0xbba9, 0x1be3: 0xbbc1, + 0x1be4: 0xbbd9, 0x1be5: 0x2109, 0x1be6: 0x1111, 0x1be7: 0x1429, 0x1be8: 0xbbf1, 0x1be9: 0xbc09, + 0x1bea: 0xbc21, 0x1beb: 0x10e1, 0x1bec: 0x10f9, 0x1bed: 0xbc39, 0x1bee: 0x2079, 0x1bef: 0xbc51, + 0x1bf0: 0xbb01, 0x1bf1: 0x1099, 0x1bf2: 0x10b1, 0x1bf3: 0x10c9, 0x1bf4: 0xbb19, 0x1bf5: 0xbb31, + 0x1bf6: 0xbb49, 0x1bf7: 0x1429, 0x1bf8: 0x1a31, 0x1bf9: 0xbb61, 0x1bfa: 0xbb79, 0x1bfb: 0xbb91, + 0x1bfc: 0xbba9, 0x1bfd: 0xbbc1, 0x1bfe: 0xbbd9, 0x1bff: 0x2109, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x1111, 0x1c01: 0xbbf1, 0x1c02: 0xbbf1, 0x1c03: 0xbc09, 0x1c04: 0xbc21, 0x1c05: 0x10e1, + 0x1c06: 0x10f9, 0x1c07: 0xbc39, 0x1c08: 0x2079, 0x1c09: 0xbc71, 0x1c0a: 0xbb19, 0x1c0b: 0x1429, + 0x1c0c: 0xbb61, 0x1c0d: 0x10e1, 0x1c0e: 0x1111, 0x1c0f: 0x2109, 0x1c10: 0xbb01, 0x1c11: 0x1099, + 0x1c12: 0x10b1, 0x1c13: 0x10c9, 0x1c14: 0xbb19, 0x1c15: 0xbb31, 0x1c16: 0xbb49, 0x1c17: 0x1429, + 0x1c18: 0x1a31, 0x1c19: 0xbb61, 0x1c1a: 0xbb79, 0x1c1b: 0xbb91, 0x1c1c: 0xbba9, 0x1c1d: 0xbbc1, + 0x1c1e: 0xbbd9, 0x1c1f: 0x2109, 0x1c20: 0x1111, 0x1c21: 0x1429, 0x1c22: 0xbbf1, 0x1c23: 0xbc09, + 0x1c24: 0xbc21, 0x1c25: 0x10e1, 0x1c26: 0x10f9, 0x1c27: 0xbc39, 0x1c28: 0x2079, 0x1c29: 0xbc51, + 0x1c2a: 0xbb01, 0x1c2b: 0x1099, 0x1c2c: 0x10b1, 0x1c2d: 0x10c9, 0x1c2e: 0xbb19, 0x1c2f: 0xbb31, + 0x1c30: 0xbb49, 0x1c31: 0x1429, 0x1c32: 0x1a31, 0x1c33: 0xbb61, 0x1c34: 0xbb79, 0x1c35: 0xbb91, + 0x1c36: 0xbba9, 0x1c37: 0xbbc1, 0x1c38: 0xbbd9, 0x1c39: 0x2109, 0x1c3a: 0x1111, 0x1c3b: 0xbbf1, + 0x1c3c: 0xbbf1, 0x1c3d: 0xbc09, 0x1c3e: 0xbc21, 0x1c3f: 0x10e1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x10f9, 0x1c41: 0xbc39, 0x1c42: 0x2079, 0x1c43: 0xbc71, 0x1c44: 0xbb19, 0x1c45: 0x1429, + 0x1c46: 0xbb61, 0x1c47: 0x10e1, 0x1c48: 0x1111, 0x1c49: 0x2109, 0x1c4a: 0xbc91, 0x1c4b: 0xbc91, + 0x1c4c: 0x0040, 0x1c4d: 0x0040, 0x1c4e: 0x1f41, 0x1c4f: 0x00c9, 0x1c50: 0x0069, 0x1c51: 0x0079, + 0x1c52: 0x1f51, 0x1c53: 0x1f61, 0x1c54: 0x1f71, 0x1c55: 0x1f81, 0x1c56: 0x1f91, 0x1c57: 0x1fa1, + 0x1c58: 0x1f41, 0x1c59: 0x00c9, 0x1c5a: 0x0069, 0x1c5b: 0x0079, 0x1c5c: 0x1f51, 0x1c5d: 0x1f61, + 0x1c5e: 0x1f71, 0x1c5f: 0x1f81, 0x1c60: 0x1f91, 0x1c61: 0x1fa1, 0x1c62: 0x1f41, 0x1c63: 0x00c9, + 0x1c64: 0x0069, 0x1c65: 0x0079, 0x1c66: 0x1f51, 0x1c67: 0x1f61, 0x1c68: 0x1f71, 0x1c69: 0x1f81, + 0x1c6a: 0x1f91, 0x1c6b: 0x1fa1, 0x1c6c: 0x1f41, 0x1c6d: 0x00c9, 0x1c6e: 0x0069, 0x1c6f: 0x0079, + 0x1c70: 0x1f51, 0x1c71: 0x1f61, 0x1c72: 0x1f71, 0x1c73: 0x1f81, 0x1c74: 0x1f91, 0x1c75: 0x1fa1, + 0x1c76: 0x1f41, 0x1c77: 0x00c9, 0x1c78: 0x0069, 0x1c79: 0x0079, 0x1c7a: 0x1f51, 0x1c7b: 0x1f61, + 0x1c7c: 0x1f71, 0x1c7d: 0x1f81, 0x1c7e: 0x1f91, 0x1c7f: 0x1fa1, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xe115, 0x1c81: 0xe115, 0x1c82: 0xe135, 0x1c83: 0xe135, 0x1c84: 0xe115, 0x1c85: 0xe115, + 0x1c86: 0xe175, 0x1c87: 0xe175, 0x1c88: 0xe115, 0x1c89: 0xe115, 0x1c8a: 0xe135, 0x1c8b: 0xe135, + 0x1c8c: 0xe115, 0x1c8d: 0xe115, 0x1c8e: 0xe1f5, 0x1c8f: 0xe1f5, 0x1c90: 0xe115, 0x1c91: 0xe115, + 0x1c92: 0xe135, 0x1c93: 0xe135, 0x1c94: 0xe115, 0x1c95: 0xe115, 0x1c96: 0xe175, 0x1c97: 0xe175, + 0x1c98: 0xe115, 0x1c99: 0xe115, 0x1c9a: 0xe135, 0x1c9b: 0xe135, 0x1c9c: 0xe115, 0x1c9d: 0xe115, + 0x1c9e: 0x8b3d, 0x1c9f: 0x8b3d, 0x1ca0: 0x04b5, 0x1ca1: 0x04b5, 0x1ca2: 0x0a08, 0x1ca3: 0x0a08, + 0x1ca4: 0x0a08, 0x1ca5: 0x0a08, 0x1ca6: 0x0a08, 0x1ca7: 0x0a08, 0x1ca8: 0x0a08, 0x1ca9: 0x0a08, + 0x1caa: 0x0a08, 0x1cab: 0x0a08, 0x1cac: 0x0a08, 0x1cad: 0x0a08, 0x1cae: 0x0a08, 0x1caf: 0x0a08, + 0x1cb0: 0x0a08, 0x1cb1: 0x0a08, 0x1cb2: 0x0a08, 0x1cb3: 0x0a08, 0x1cb4: 0x0a08, 0x1cb5: 0x0a08, + 0x1cb6: 0x0a08, 0x1cb7: 0x0a08, 0x1cb8: 0x0a08, 0x1cb9: 0x0a08, 0x1cba: 0x0a08, 0x1cbb: 0x0a08, + 0x1cbc: 0x0a08, 0x1cbd: 0x0a08, 0x1cbe: 0x0a08, 0x1cbf: 0x0a08, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb1d9, 0x1cc1: 0xb1f1, 0x1cc2: 0xb251, 0x1cc3: 0xb299, 0x1cc4: 0x0040, 0x1cc5: 0xb461, + 0x1cc6: 0xb2e1, 0x1cc7: 0xb269, 0x1cc8: 0xb359, 0x1cc9: 0xb479, 0x1cca: 0xb3e9, 0x1ccb: 0xb401, + 0x1ccc: 0xb419, 0x1ccd: 0xb431, 0x1cce: 0xb2f9, 0x1ccf: 0xb389, 0x1cd0: 0xb3b9, 0x1cd1: 0xb329, + 0x1cd2: 0xb3d1, 0x1cd3: 0xb2c9, 0x1cd4: 0xb311, 0x1cd5: 0xb221, 0x1cd6: 0xb239, 0x1cd7: 0xb281, + 0x1cd8: 0xb2b1, 0x1cd9: 0xb341, 0x1cda: 0xb371, 0x1cdb: 0xb3a1, 0x1cdc: 0xbca9, 0x1cdd: 0x7999, + 0x1cde: 0xbcc1, 0x1cdf: 0xbcd9, 0x1ce0: 0x0040, 0x1ce1: 0xb1f1, 0x1ce2: 0xb251, 0x1ce3: 0x0040, + 0x1ce4: 0xb449, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb269, 0x1ce8: 0x0040, 0x1ce9: 0xb479, + 0x1cea: 0xb3e9, 0x1ceb: 0xb401, 0x1cec: 0xb419, 0x1ced: 0xb431, 0x1cee: 0xb2f9, 0x1cef: 0xb389, + 0x1cf0: 0xb3b9, 0x1cf1: 0xb329, 0x1cf2: 0xb3d1, 0x1cf3: 0x0040, 0x1cf4: 0xb311, 0x1cf5: 0xb221, + 0x1cf6: 0xb239, 0x1cf7: 0xb281, 0x1cf8: 0x0040, 0x1cf9: 0xb341, 0x1cfa: 0x0040, 0x1cfb: 0xb3a1, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0x0040, 0x1d02: 0xb251, 0x1d03: 0x0040, 0x1d04: 0x0040, 0x1d05: 0x0040, + 0x1d06: 0x0040, 0x1d07: 0xb269, 0x1d08: 0x0040, 0x1d09: 0xb479, 0x1d0a: 0x0040, 0x1d0b: 0xb401, + 0x1d0c: 0x0040, 0x1d0d: 0xb431, 0x1d0e: 0xb2f9, 0x1d0f: 0xb389, 0x1d10: 0x0040, 0x1d11: 0xb329, + 0x1d12: 0xb3d1, 0x1d13: 0x0040, 0x1d14: 0xb311, 0x1d15: 0x0040, 0x1d16: 0x0040, 0x1d17: 0xb281, + 0x1d18: 0x0040, 0x1d19: 0xb341, 0x1d1a: 0x0040, 0x1d1b: 0xb3a1, 0x1d1c: 0x0040, 0x1d1d: 0x7999, + 0x1d1e: 0x0040, 0x1d1f: 0xbcd9, 0x1d20: 0x0040, 0x1d21: 0xb1f1, 0x1d22: 0xb251, 0x1d23: 0x0040, + 0x1d24: 0xb449, 0x1d25: 0x0040, 0x1d26: 0x0040, 0x1d27: 0xb269, 0x1d28: 0xb359, 0x1d29: 0xb479, + 0x1d2a: 0xb3e9, 0x1d2b: 0x0040, 0x1d2c: 0xb419, 0x1d2d: 0xb431, 0x1d2e: 0xb2f9, 0x1d2f: 0xb389, + 0x1d30: 0xb3b9, 0x1d31: 0xb329, 0x1d32: 0xb3d1, 0x1d33: 0x0040, 0x1d34: 0xb311, 0x1d35: 0xb221, + 0x1d36: 0xb239, 0x1d37: 0xb281, 0x1d38: 0x0040, 0x1d39: 0xb341, 0x1d3a: 0xb371, 0x1d3b: 0xb3a1, + 0x1d3c: 0xbca9, 0x1d3d: 0x0040, 0x1d3e: 0xbcc1, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0xb1d9, 0x1d41: 0xb1f1, 0x1d42: 0xb251, 0x1d43: 0xb299, 0x1d44: 0xb449, 0x1d45: 0xb461, + 0x1d46: 0xb2e1, 0x1d47: 0xb269, 0x1d48: 0xb359, 0x1d49: 0xb479, 0x1d4a: 0x0040, 0x1d4b: 0xb401, + 0x1d4c: 0xb419, 0x1d4d: 0xb431, 0x1d4e: 0xb2f9, 0x1d4f: 0xb389, 0x1d50: 0xb3b9, 0x1d51: 0xb329, + 0x1d52: 0xb3d1, 0x1d53: 0xb2c9, 0x1d54: 0xb311, 0x1d55: 0xb221, 0x1d56: 0xb239, 0x1d57: 0xb281, + 0x1d58: 0xb2b1, 0x1d59: 0xb341, 0x1d5a: 0xb371, 0x1d5b: 0xb3a1, 0x1d5c: 0x0040, 0x1d5d: 0x0040, + 0x1d5e: 0x0040, 0x1d5f: 0x0040, 0x1d60: 0x0040, 0x1d61: 0xb1f1, 0x1d62: 0xb251, 0x1d63: 0xb299, + 0x1d64: 0x0040, 0x1d65: 0xb461, 0x1d66: 0xb2e1, 0x1d67: 0xb269, 0x1d68: 0xb359, 0x1d69: 0xb479, + 0x1d6a: 0x0040, 0x1d6b: 0xb401, 0x1d6c: 0xb419, 0x1d6d: 0xb431, 0x1d6e: 0xb2f9, 0x1d6f: 0xb389, + 0x1d70: 0xb3b9, 0x1d71: 0xb329, 0x1d72: 0xb3d1, 0x1d73: 0xb2c9, 0x1d74: 0xb311, 0x1d75: 0xb221, + 0x1d76: 0xb239, 0x1d77: 0xb281, 0x1d78: 0xb2b1, 0x1d79: 0xb341, 0x1d7a: 0xb371, 0x1d7b: 0xb3a1, + 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0040, 0x1d81: 0xbcf2, 0x1d82: 0xbd0a, 0x1d83: 0xbd22, 0x1d84: 0xbd3a, 0x1d85: 0xbd52, + 0x1d86: 0xbd6a, 0x1d87: 0xbd82, 0x1d88: 0xbd9a, 0x1d89: 0xbdb2, 0x1d8a: 0xbdca, 0x1d8b: 0x0018, + 0x1d8c: 0x0018, 0x1d8d: 0x0018, 0x1d8e: 0x0018, 0x1d8f: 0x0018, 0x1d90: 0xbde2, 0x1d91: 0xbe02, + 0x1d92: 0xbe22, 0x1d93: 0xbe42, 0x1d94: 0xbe62, 0x1d95: 0xbe82, 0x1d96: 0xbea2, 0x1d97: 0xbec2, + 0x1d98: 0xbee2, 0x1d99: 0xbf02, 0x1d9a: 0xbf22, 0x1d9b: 0xbf42, 0x1d9c: 0xbf62, 0x1d9d: 0xbf82, + 0x1d9e: 0xbfa2, 0x1d9f: 0xbfc2, 0x1da0: 0xbfe2, 0x1da1: 0xc002, 0x1da2: 0xc022, 0x1da3: 0xc042, + 0x1da4: 0xc062, 0x1da5: 0xc082, 0x1da6: 0xc0a2, 0x1da7: 0xc0c2, 0x1da8: 0xc0e2, 0x1da9: 0xc102, + 0x1daa: 0xc121, 0x1dab: 0x1159, 0x1dac: 0x0269, 0x1dad: 0x66a9, 0x1dae: 0xc161, 0x1daf: 0x0018, + 0x1db0: 0x0039, 0x1db1: 0x0ee9, 0x1db2: 0x1159, 0x1db3: 0x0ef9, 0x1db4: 0x0f09, 0x1db5: 0x1199, + 0x1db6: 0x0f31, 0x1db7: 0x0249, 0x1db8: 0x0f41, 0x1db9: 0x0259, 0x1dba: 0x0f51, 0x1dbb: 0x0359, + 0x1dbc: 0x0f61, 0x1dbd: 0x0f71, 0x1dbe: 0x00d9, 0x1dbf: 0x0f99, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x2039, 0x1dc1: 0x0269, 0x1dc2: 0x01d9, 0x1dc3: 0x0fa9, 0x1dc4: 0x0fb9, 0x1dc5: 0x1089, + 0x1dc6: 0x0279, 0x1dc7: 0x0369, 0x1dc8: 0x0289, 0x1dc9: 0x13d1, 0x1dca: 0xc179, 0x1dcb: 0x65e9, + 0x1dcc: 0xc191, 0x1dcd: 0x1441, 0x1dce: 0xc1a9, 0x1dcf: 0xc1c9, 0x1dd0: 0x0018, 0x1dd1: 0x0018, + 0x1dd2: 0x0018, 0x1dd3: 0x0018, 0x1dd4: 0x0018, 0x1dd5: 0x0018, 0x1dd6: 0x0018, 0x1dd7: 0x0018, + 0x1dd8: 0x0018, 0x1dd9: 0x0018, 0x1dda: 0x0018, 0x1ddb: 0x0018, 0x1ddc: 0x0018, 0x1ddd: 0x0018, + 0x1dde: 0x0018, 0x1ddf: 0x0018, 0x1de0: 0x0018, 0x1de1: 0x0018, 0x1de2: 0x0018, 0x1de3: 0x0018, + 0x1de4: 0x0018, 0x1de5: 0x0018, 0x1de6: 0x0018, 0x1de7: 0x0018, 0x1de8: 0x0018, 0x1de9: 0x0018, + 0x1dea: 0xc1e1, 0x1deb: 0xc1f9, 0x1dec: 0xc211, 0x1ded: 0x0018, 0x1dee: 0x0018, 0x1def: 0x0018, + 0x1df0: 0x0018, 0x1df1: 0x0018, 0x1df2: 0x0018, 0x1df3: 0x0018, 0x1df4: 0x0018, 0x1df5: 0x0018, + 0x1df6: 0x0018, 0x1df7: 0x0018, 0x1df8: 0x0018, 0x1df9: 0x0018, 0x1dfa: 0x0018, 0x1dfb: 0x0018, + 0x1dfc: 0x0018, 0x1dfd: 0x0018, 0x1dfe: 0x0018, 0x1dff: 0x0018, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xc241, 0x1e01: 0xc279, 0x1e02: 0xc2b1, 0x1e03: 0x0040, 0x1e04: 0x0040, 0x1e05: 0x0040, + 0x1e06: 0x0040, 0x1e07: 0x0040, 0x1e08: 0x0040, 0x1e09: 0x0040, 0x1e0a: 0x0040, 0x1e0b: 0x0040, + 0x1e0c: 0x0040, 0x1e0d: 0x0040, 0x1e0e: 0x0040, 0x1e0f: 0x0040, 0x1e10: 0xc2d1, 0x1e11: 0xc2f1, + 0x1e12: 0xc311, 0x1e13: 0xc331, 0x1e14: 0xc351, 0x1e15: 0xc371, 0x1e16: 0xc391, 0x1e17: 0xc3b1, + 0x1e18: 0xc3d1, 0x1e19: 0xc3f1, 0x1e1a: 0xc411, 0x1e1b: 0xc431, 0x1e1c: 0xc451, 0x1e1d: 0xc471, + 0x1e1e: 0xc491, 0x1e1f: 0xc4b1, 0x1e20: 0xc4d1, 0x1e21: 0xc4f1, 0x1e22: 0xc511, 0x1e23: 0xc531, + 0x1e24: 0xc551, 0x1e25: 0xc571, 0x1e26: 0xc591, 0x1e27: 0xc5b1, 0x1e28: 0xc5d1, 0x1e29: 0xc5f1, + 0x1e2a: 0xc611, 0x1e2b: 0xc631, 0x1e2c: 0xc651, 0x1e2d: 0xc671, 0x1e2e: 0xc691, 0x1e2f: 0xc6b1, + 0x1e30: 0xc6d1, 0x1e31: 0xc6f1, 0x1e32: 0xc711, 0x1e33: 0xc731, 0x1e34: 0xc751, 0x1e35: 0xc771, + 0x1e36: 0xc791, 0x1e37: 0xc7b1, 0x1e38: 0xc7d1, 0x1e39: 0xc7f1, 0x1e3a: 0xc811, 0x1e3b: 0xc831, + 0x1e3c: 0x0040, 0x1e3d: 0x0040, 0x1e3e: 0x0040, 0x1e3f: 0x0040, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xcb61, 0x1e41: 0xcb81, 0x1e42: 0xcba1, 0x1e43: 0x8b55, 0x1e44: 0xcbc1, 0x1e45: 0xcbe1, + 0x1e46: 0xcc01, 0x1e47: 0xcc21, 0x1e48: 0xcc41, 0x1e49: 0xcc61, 0x1e4a: 0xcc81, 0x1e4b: 0xcca1, + 0x1e4c: 0xccc1, 0x1e4d: 0x8b75, 0x1e4e: 0xcce1, 0x1e4f: 0xcd01, 0x1e50: 0xcd21, 0x1e51: 0xcd41, + 0x1e52: 0x8b95, 0x1e53: 0xcd61, 0x1e54: 0xcd81, 0x1e55: 0xc491, 0x1e56: 0x8bb5, 0x1e57: 0xcda1, + 0x1e58: 0xcdc1, 0x1e59: 0xcde1, 0x1e5a: 0xce01, 0x1e5b: 0xce21, 0x1e5c: 0x8bd5, 0x1e5d: 0xce41, + 0x1e5e: 0xce61, 0x1e5f: 0xce81, 0x1e60: 0xcea1, 0x1e61: 0xcec1, 0x1e62: 0xc7f1, 0x1e63: 0xcee1, + 0x1e64: 0xcf01, 0x1e65: 0xcf21, 0x1e66: 0xcf41, 0x1e67: 0xcf61, 0x1e68: 0xcf81, 0x1e69: 0xcfa1, + 0x1e6a: 0xcfc1, 0x1e6b: 0xcfe1, 0x1e6c: 0xd001, 0x1e6d: 0xd021, 0x1e6e: 0xd041, 0x1e6f: 0xd061, + 0x1e70: 0xd081, 0x1e71: 0xd0a1, 0x1e72: 0xd0a1, 0x1e73: 0xd0a1, 0x1e74: 0x8bf5, 0x1e75: 0xd0c1, + 0x1e76: 0xd0e1, 0x1e77: 0xd101, 0x1e78: 0x8c15, 0x1e79: 0xd121, 0x1e7a: 0xd141, 0x1e7b: 0xd161, + 0x1e7c: 0xd181, 0x1e7d: 0xd1a1, 0x1e7e: 0xd1c1, 0x1e7f: 0xd1e1, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd201, 0x1e81: 0xd221, 0x1e82: 0xd241, 0x1e83: 0xd261, 0x1e84: 0xd281, 0x1e85: 0xd2a1, + 0x1e86: 0xd2a1, 0x1e87: 0xd2c1, 0x1e88: 0xd2e1, 0x1e89: 0xd301, 0x1e8a: 0xd321, 0x1e8b: 0xd341, + 0x1e8c: 0xd361, 0x1e8d: 0xd381, 0x1e8e: 0xd3a1, 0x1e8f: 0xd3c1, 0x1e90: 0xd3e1, 0x1e91: 0xd401, + 0x1e92: 0xd421, 0x1e93: 0xd441, 0x1e94: 0xd461, 0x1e95: 0xd481, 0x1e96: 0xd4a1, 0x1e97: 0xd4c1, + 0x1e98: 0xd4e1, 0x1e99: 0x8c35, 0x1e9a: 0xd501, 0x1e9b: 0xd521, 0x1e9c: 0xd541, 0x1e9d: 0xc371, + 0x1e9e: 0xd561, 0x1e9f: 0xd581, 0x1ea0: 0x8c55, 0x1ea1: 0x8c75, 0x1ea2: 0xd5a1, 0x1ea3: 0xd5c1, + 0x1ea4: 0xd5e1, 0x1ea5: 0xd601, 0x1ea6: 0xd621, 0x1ea7: 0xd641, 0x1ea8: 0x2040, 0x1ea9: 0xd661, + 0x1eaa: 0xd681, 0x1eab: 0xd681, 0x1eac: 0x8c95, 0x1ead: 0xd6a1, 0x1eae: 0xd6c1, 0x1eaf: 0xd6e1, + 0x1eb0: 0xd701, 0x1eb1: 0x8cb5, 0x1eb2: 0xd721, 0x1eb3: 0xd741, 0x1eb4: 0x2040, 0x1eb5: 0xd761, + 0x1eb6: 0xd781, 0x1eb7: 0xd7a1, 0x1eb8: 0xd7c1, 0x1eb9: 0xd7e1, 0x1eba: 0xd801, 0x1ebb: 0x8cd5, + 0x1ebc: 0xd821, 0x1ebd: 0x8cf5, 0x1ebe: 0xd841, 0x1ebf: 0xd861, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xd881, 0x1ec1: 0xd8a1, 0x1ec2: 0xd8c1, 0x1ec3: 0xd8e1, 0x1ec4: 0xd901, 0x1ec5: 0xd921, + 0x1ec6: 0xd941, 0x1ec7: 0xd961, 0x1ec8: 0xd981, 0x1ec9: 0x8d15, 0x1eca: 0xd9a1, 0x1ecb: 0xd9c1, + 0x1ecc: 0xd9e1, 0x1ecd: 0xda01, 0x1ece: 0xda21, 0x1ecf: 0x8d35, 0x1ed0: 0xda41, 0x1ed1: 0x8d55, + 0x1ed2: 0x8d75, 0x1ed3: 0xda61, 0x1ed4: 0xda81, 0x1ed5: 0xda81, 0x1ed6: 0xdaa1, 0x1ed7: 0x8d95, + 0x1ed8: 0x8db5, 0x1ed9: 0xdac1, 0x1eda: 0xdae1, 0x1edb: 0xdb01, 0x1edc: 0xdb21, 0x1edd: 0xdb41, + 0x1ede: 0xdb61, 0x1edf: 0xdb81, 0x1ee0: 0xdba1, 0x1ee1: 0xdbc1, 0x1ee2: 0xdbe1, 0x1ee3: 0xdc01, + 0x1ee4: 0x8dd5, 0x1ee5: 0xdc21, 0x1ee6: 0xdc41, 0x1ee7: 0xdc61, 0x1ee8: 0xdc81, 0x1ee9: 0xdc61, + 0x1eea: 0xdca1, 0x1eeb: 0xdcc1, 0x1eec: 0xdce1, 0x1eed: 0xdd01, 0x1eee: 0xdd21, 0x1eef: 0xdd41, + 0x1ef0: 0xdd61, 0x1ef1: 0xdd81, 0x1ef2: 0xdda1, 0x1ef3: 0xddc1, 0x1ef4: 0xdde1, 0x1ef5: 0xde01, + 0x1ef6: 0xde21, 0x1ef7: 0xde41, 0x1ef8: 0x8df5, 0x1ef9: 0xde61, 0x1efa: 0xde81, 0x1efb: 0xdea1, + 0x1efc: 0xdec1, 0x1efd: 0xdee1, 0x1efe: 0x8e15, 0x1eff: 0xdf01, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xe601, 0x1f01: 0xe621, 0x1f02: 0xe641, 0x1f03: 0xe661, 0x1f04: 0xe681, 0x1f05: 0xe6a1, + 0x1f06: 0x8f35, 0x1f07: 0xe6c1, 0x1f08: 0xe6e1, 0x1f09: 0xe701, 0x1f0a: 0xe721, 0x1f0b: 0xe741, + 0x1f0c: 0xe761, 0x1f0d: 0x8f55, 0x1f0e: 0xe781, 0x1f0f: 0xe7a1, 0x1f10: 0x8f75, 0x1f11: 0x8f95, + 0x1f12: 0xe7c1, 0x1f13: 0xe7e1, 0x1f14: 0xe801, 0x1f15: 0xe821, 0x1f16: 0xe841, 0x1f17: 0xe861, + 0x1f18: 0xe881, 0x1f19: 0xe8a1, 0x1f1a: 0xe8c1, 0x1f1b: 0x8fb5, 0x1f1c: 0xe8e1, 0x1f1d: 0x8fd5, + 0x1f1e: 0xe901, 0x1f1f: 0x2040, 0x1f20: 0xe921, 0x1f21: 0xe941, 0x1f22: 0xe961, 0x1f23: 0x8ff5, + 0x1f24: 0xe981, 0x1f25: 0xe9a1, 0x1f26: 0x9015, 0x1f27: 0x9035, 0x1f28: 0xe9c1, 0x1f29: 0xe9e1, + 0x1f2a: 0xea01, 0x1f2b: 0xea21, 0x1f2c: 0xea41, 0x1f2d: 0xea41, 0x1f2e: 0xea61, 0x1f2f: 0xea81, + 0x1f30: 0xeaa1, 0x1f31: 0xeac1, 0x1f32: 0xeae1, 0x1f33: 0xeb01, 0x1f34: 0xeb21, 0x1f35: 0x9055, + 0x1f36: 0xeb41, 0x1f37: 0x9075, 0x1f38: 0xeb61, 0x1f39: 0x9095, 0x1f3a: 0xeb81, 0x1f3b: 0x90b5, + 0x1f3c: 0x90d5, 0x1f3d: 0x90f5, 0x1f3e: 0xeba1, 0x1f3f: 0xebc1, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xebe1, 0x1f41: 0x9115, 0x1f42: 0x9135, 0x1f43: 0x9155, 0x1f44: 0x9175, 0x1f45: 0xec01, + 0x1f46: 0xec21, 0x1f47: 0xec21, 0x1f48: 0xec41, 0x1f49: 0xec61, 0x1f4a: 0xec81, 0x1f4b: 0xeca1, + 0x1f4c: 0xecc1, 0x1f4d: 0x9195, 0x1f4e: 0xece1, 0x1f4f: 0xed01, 0x1f50: 0xed21, 0x1f51: 0xed41, + 0x1f52: 0x91b5, 0x1f53: 0xed61, 0x1f54: 0x91d5, 0x1f55: 0x91f5, 0x1f56: 0xed81, 0x1f57: 0xeda1, + 0x1f58: 0xedc1, 0x1f59: 0xede1, 0x1f5a: 0xee01, 0x1f5b: 0xee21, 0x1f5c: 0x9215, 0x1f5d: 0x9235, + 0x1f5e: 0x9255, 0x1f5f: 0x2040, 0x1f60: 0xee41, 0x1f61: 0x9275, 0x1f62: 0xee61, 0x1f63: 0xee81, + 0x1f64: 0xeea1, 0x1f65: 0x9295, 0x1f66: 0xeec1, 0x1f67: 0xeee1, 0x1f68: 0xef01, 0x1f69: 0xef21, + 0x1f6a: 0xef41, 0x1f6b: 0x92b5, 0x1f6c: 0xef61, 0x1f6d: 0xef81, 0x1f6e: 0xefa1, 0x1f6f: 0xefc1, + 0x1f70: 0xefe1, 0x1f71: 0xf001, 0x1f72: 0x92d5, 0x1f73: 0x92f5, 0x1f74: 0xf021, 0x1f75: 0x9315, + 0x1f76: 0xf041, 0x1f77: 0x9335, 0x1f78: 0xf061, 0x1f79: 0xf081, 0x1f7a: 0xf0a1, 0x1f7b: 0x9355, + 0x1f7c: 0x9375, 0x1f7d: 0xf0c1, 0x1f7e: 0x9395, 0x1f7f: 0xf0e1, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xf721, 0x1f81: 0xf741, 0x1f82: 0xf761, 0x1f83: 0xf781, 0x1f84: 0xf7a1, 0x1f85: 0x9555, + 0x1f86: 0xf7c1, 0x1f87: 0xf7e1, 0x1f88: 0xf801, 0x1f89: 0xf821, 0x1f8a: 0xf841, 0x1f8b: 0x9575, + 0x1f8c: 0x9595, 0x1f8d: 0xf861, 0x1f8e: 0xf881, 0x1f8f: 0xf8a1, 0x1f90: 0xf8c1, 0x1f91: 0xf8e1, + 0x1f92: 0xf901, 0x1f93: 0x95b5, 0x1f94: 0xf921, 0x1f95: 0xf941, 0x1f96: 0xf961, 0x1f97: 0xf981, + 0x1f98: 0x95d5, 0x1f99: 0x95f5, 0x1f9a: 0xf9a1, 0x1f9b: 0xf9c1, 0x1f9c: 0xf9e1, 0x1f9d: 0x9615, + 0x1f9e: 0xfa01, 0x1f9f: 0xfa21, 0x1fa0: 0x684d, 0x1fa1: 0x9635, 0x1fa2: 0xfa41, 0x1fa3: 0xfa61, + 0x1fa4: 0xfa81, 0x1fa5: 0x9655, 0x1fa6: 0xfaa1, 0x1fa7: 0xfac1, 0x1fa8: 0xfae1, 0x1fa9: 0xfb01, + 0x1faa: 0xfb21, 0x1fab: 0xfb41, 0x1fac: 0xfb61, 0x1fad: 0x9675, 0x1fae: 0xfb81, 0x1faf: 0xfba1, + 0x1fb0: 0xfbc1, 0x1fb1: 0x9695, 0x1fb2: 0xfbe1, 0x1fb3: 0xfc01, 0x1fb4: 0xfc21, 0x1fb5: 0xfc41, + 0x1fb6: 0x7b6d, 0x1fb7: 0x96b5, 0x1fb8: 0xfc61, 0x1fb9: 0xfc81, 0x1fba: 0xfca1, 0x1fbb: 0x96d5, + 0x1fbc: 0xfcc1, 0x1fbd: 0x96f5, 0x1fbe: 0xfce1, 0x1fbf: 0xfce1, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0xfd01, 0x1fc1: 0x9715, 0x1fc2: 0xfd21, 0x1fc3: 0xfd41, 0x1fc4: 0xfd61, 0x1fc5: 0xfd81, + 0x1fc6: 0xfda1, 0x1fc7: 0xfdc1, 0x1fc8: 0xfde1, 0x1fc9: 0x9735, 0x1fca: 0xfe01, 0x1fcb: 0xfe21, + 0x1fcc: 0xfe41, 0x1fcd: 0xfe61, 0x1fce: 0xfe81, 0x1fcf: 0xfea1, 0x1fd0: 0x9755, 0x1fd1: 0xfec1, + 0x1fd2: 0x9775, 0x1fd3: 0x9795, 0x1fd4: 0x97b5, 0x1fd5: 0xfee1, 0x1fd6: 0xff01, 0x1fd7: 0xff21, + 0x1fd8: 0xff41, 0x1fd9: 0xff61, 0x1fda: 0xff81, 0x1fdb: 0xffa1, 0x1fdc: 0xffc1, 0x1fdd: 0x97d5, + 0x1fde: 0x0040, 0x1fdf: 0x0040, 0x1fe0: 0x0040, 0x1fe1: 0x0040, 0x1fe2: 0x0040, 0x1fe3: 0x0040, + 0x1fe4: 0x0040, 0x1fe5: 0x0040, 0x1fe6: 0x0040, 0x1fe7: 0x0040, 0x1fe8: 0x0040, 0x1fe9: 0x0040, + 0x1fea: 0x0040, 0x1feb: 0x0040, 0x1fec: 0x0040, 0x1fed: 0x0040, 0x1fee: 0x0040, 0x1fef: 0x0040, + 0x1ff0: 0x0040, 0x1ff1: 0x0040, 0x1ff2: 0x0040, 0x1ff3: 0x0040, 0x1ff4: 0x0040, 0x1ff5: 0x0040, + 0x1ff6: 0x0040, 0x1ff7: 0x0040, 0x1ff8: 0x0040, 0x1ff9: 0x0040, 0x1ffa: 0x0040, 0x1ffb: 0x0040, + 0x1ffc: 0x0040, 0x1ffd: 0x0040, 0x1ffe: 0x0040, 0x1fff: 0x0040, +} + +// idnaIndex: 37 blocks, 2368 entries, 4736 bytes +// Block 0 is the zero block. +var idnaIndex = [2368]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7e, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7f, 0xca: 0x80, 0xcb: 0x07, 0xcc: 0x81, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x82, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x83, 0xd6: 0x84, 0xd7: 0x85, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x86, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x87, 0xde: 0x88, 0xdf: 0x89, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1e, 0xf1: 0x1f, 0xf2: 0x1f, 0xf3: 0x21, 0xf4: 0x22, + // Block 0x4, offset 0x100 + 0x120: 0x8a, 0x121: 0x13, 0x122: 0x8b, 0x123: 0x8c, 0x124: 0x8d, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8e, + 0x130: 0x8f, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x90, 0x135: 0x21, 0x136: 0x91, 0x137: 0x92, + 0x138: 0x93, 0x139: 0x94, 0x13a: 0x22, 0x13b: 0x95, 0x13c: 0x96, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x97, + // Block 0x5, offset 0x140 + 0x140: 0x98, 0x141: 0x99, 0x142: 0x9a, 0x143: 0x9b, 0x144: 0x9c, 0x145: 0x9d, 0x146: 0x9e, 0x147: 0x9f, + 0x148: 0xa0, 0x149: 0xa1, 0x14a: 0xa2, 0x14b: 0xa3, 0x14c: 0xa4, 0x14d: 0xa5, 0x14e: 0xa6, 0x14f: 0xa7, + 0x150: 0xa8, 0x151: 0xa0, 0x152: 0xa0, 0x153: 0xa0, 0x154: 0xa0, 0x155: 0xa0, 0x156: 0xa0, 0x157: 0xa0, + 0x158: 0xa0, 0x159: 0xa9, 0x15a: 0xaa, 0x15b: 0xab, 0x15c: 0xac, 0x15d: 0xad, 0x15e: 0xae, 0x15f: 0xaf, + 0x160: 0xb0, 0x161: 0xb1, 0x162: 0xb2, 0x163: 0xb3, 0x164: 0xb4, 0x165: 0xb5, 0x166: 0xb6, 0x167: 0xb7, + 0x168: 0xb8, 0x169: 0xb9, 0x16a: 0xba, 0x16b: 0xbb, 0x16c: 0xbc, 0x16d: 0xbd, 0x16e: 0xbe, 0x16f: 0xbf, + 0x170: 0xc0, 0x171: 0xc1, 0x172: 0xc2, 0x173: 0xc3, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc4, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc5, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc6, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc7, 0x187: 0x9c, + 0x188: 0xc8, 0x189: 0xc9, 0x18a: 0x9c, 0x18b: 0x9c, 0x18c: 0xca, 0x18d: 0x9c, 0x18e: 0x9c, 0x18f: 0x9c, + 0x190: 0xcb, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9c, 0x195: 0x9c, 0x196: 0x9c, 0x197: 0x9c, + 0x198: 0x9c, 0x199: 0x9c, 0x19a: 0x9c, 0x19b: 0x9c, 0x19c: 0x9c, 0x19d: 0x9c, 0x19e: 0x9c, 0x19f: 0x9c, + 0x1a0: 0x9c, 0x1a1: 0x9c, 0x1a2: 0x9c, 0x1a3: 0x9c, 0x1a4: 0x9c, 0x1a5: 0x9c, 0x1a6: 0x9c, 0x1a7: 0x9c, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9c, 0x1ab: 0xce, 0x1ac: 0x9c, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0x9c, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0xa0, 0x1d1: 0xa0, 0x1d2: 0xa0, 0x1d3: 0xa0, 0x1d4: 0xa0, 0x1d5: 0xa0, 0x1d6: 0xa0, 0x1d7: 0xa0, + 0x1d8: 0xa0, 0x1d9: 0xa0, 0x1da: 0xa0, 0x1db: 0xa0, 0x1dc: 0xa0, 0x1dd: 0xa0, 0x1de: 0xa0, 0x1df: 0xa0, + 0x1e0: 0xa0, 0x1e1: 0xa0, 0x1e2: 0xa0, 0x1e3: 0xa0, 0x1e4: 0xa0, 0x1e5: 0xa0, 0x1e6: 0xa0, 0x1e7: 0xa0, + 0x1e8: 0xa0, 0x1e9: 0xa0, 0x1ea: 0xa0, 0x1eb: 0xa0, 0x1ec: 0xa0, 0x1ed: 0xa0, 0x1ee: 0xa0, 0x1ef: 0xa0, + 0x1f0: 0xa0, 0x1f1: 0xa0, 0x1f2: 0xa0, 0x1f3: 0xa0, 0x1f4: 0xa0, 0x1f5: 0xa0, 0x1f6: 0xa0, 0x1f7: 0xa0, + 0x1f8: 0xa0, 0x1f9: 0xa0, 0x1fa: 0xa0, 0x1fb: 0xa0, 0x1fc: 0xa0, 0x1fd: 0xa0, 0x1fe: 0xa0, 0x1ff: 0xa0, + // Block 0x8, offset 0x200 + 0x200: 0xa0, 0x201: 0xa0, 0x202: 0xa0, 0x203: 0xa0, 0x204: 0xa0, 0x205: 0xa0, 0x206: 0xa0, 0x207: 0xa0, + 0x208: 0xa0, 0x209: 0xa0, 0x20a: 0xa0, 0x20b: 0xa0, 0x20c: 0xa0, 0x20d: 0xa0, 0x20e: 0xa0, 0x20f: 0xa0, + 0x210: 0xa0, 0x211: 0xa0, 0x212: 0xa0, 0x213: 0xa0, 0x214: 0xa0, 0x215: 0xa0, 0x216: 0xa0, 0x217: 0xa0, + 0x218: 0xa0, 0x219: 0xa0, 0x21a: 0xa0, 0x21b: 0xa0, 0x21c: 0xa0, 0x21d: 0xa0, 0x21e: 0xa0, 0x21f: 0xa0, + 0x220: 0xa0, 0x221: 0xa0, 0x222: 0xa0, 0x223: 0xa0, 0x224: 0xa0, 0x225: 0xa0, 0x226: 0xa0, 0x227: 0xa0, + 0x228: 0xa0, 0x229: 0xa0, 0x22a: 0xa0, 0x22b: 0xa0, 0x22c: 0xa0, 0x22d: 0xa0, 0x22e: 0xa0, 0x22f: 0xa0, + 0x230: 0xa0, 0x231: 0xa0, 0x232: 0xa0, 0x233: 0xa0, 0x234: 0xa0, 0x235: 0xa0, 0x236: 0xa0, 0x237: 0x9c, + 0x238: 0xa0, 0x239: 0xa0, 0x23a: 0xa0, 0x23b: 0xa0, 0x23c: 0xa0, 0x23d: 0xa0, 0x23e: 0xa0, 0x23f: 0xa0, + // Block 0x9, offset 0x240 + 0x240: 0xa0, 0x241: 0xa0, 0x242: 0xa0, 0x243: 0xa0, 0x244: 0xa0, 0x245: 0xa0, 0x246: 0xa0, 0x247: 0xa0, + 0x248: 0xa0, 0x249: 0xa0, 0x24a: 0xa0, 0x24b: 0xa0, 0x24c: 0xa0, 0x24d: 0xa0, 0x24e: 0xa0, 0x24f: 0xa0, + 0x250: 0xa0, 0x251: 0xa0, 0x252: 0xa0, 0x253: 0xa0, 0x254: 0xa0, 0x255: 0xa0, 0x256: 0xa0, 0x257: 0xa0, + 0x258: 0xa0, 0x259: 0xa0, 0x25a: 0xa0, 0x25b: 0xa0, 0x25c: 0xa0, 0x25d: 0xa0, 0x25e: 0xa0, 0x25f: 0xa0, + 0x260: 0xa0, 0x261: 0xa0, 0x262: 0xa0, 0x263: 0xa0, 0x264: 0xa0, 0x265: 0xa0, 0x266: 0xa0, 0x267: 0xa0, + 0x268: 0xa0, 0x269: 0xa0, 0x26a: 0xa0, 0x26b: 0xa0, 0x26c: 0xa0, 0x26d: 0xa0, 0x26e: 0xa0, 0x26f: 0xa0, + 0x270: 0xa0, 0x271: 0xa0, 0x272: 0xa0, 0x273: 0xa0, 0x274: 0xa0, 0x275: 0xa0, 0x276: 0xa0, 0x277: 0xa0, + 0x278: 0xa0, 0x279: 0xa0, 0x27a: 0xa0, 0x27b: 0xa0, 0x27c: 0xa0, 0x27d: 0xa0, 0x27e: 0xa0, 0x27f: 0xa0, + // Block 0xa, offset 0x280 + 0x280: 0xa0, 0x281: 0xa0, 0x282: 0xa0, 0x283: 0xa0, 0x284: 0xa0, 0x285: 0xa0, 0x286: 0xa0, 0x287: 0xa0, + 0x288: 0xa0, 0x289: 0xa0, 0x28a: 0xa0, 0x28b: 0xa0, 0x28c: 0xa0, 0x28d: 0xa0, 0x28e: 0xa0, 0x28f: 0xa0, + 0x290: 0xa0, 0x291: 0xa0, 0x292: 0xa0, 0x293: 0xa0, 0x294: 0xa0, 0x295: 0xa0, 0x296: 0xa0, 0x297: 0xa0, + 0x298: 0xa0, 0x299: 0xa0, 0x29a: 0xa0, 0x29b: 0xa0, 0x29c: 0xa0, 0x29d: 0xa0, 0x29e: 0xa0, 0x29f: 0xa0, + 0x2a0: 0xa0, 0x2a1: 0xa0, 0x2a2: 0xa0, 0x2a3: 0xa0, 0x2a4: 0xa0, 0x2a5: 0xa0, 0x2a6: 0xa0, 0x2a7: 0xa0, + 0x2a8: 0xa0, 0x2a9: 0xa0, 0x2aa: 0xa0, 0x2ab: 0xa0, 0x2ac: 0xa0, 0x2ad: 0xa0, 0x2ae: 0xa0, 0x2af: 0xa0, + 0x2b0: 0xa0, 0x2b1: 0xa0, 0x2b2: 0xa0, 0x2b3: 0xa0, 0x2b4: 0xa0, 0x2b5: 0xa0, 0x2b6: 0xa0, 0x2b7: 0xa0, + 0x2b8: 0xa0, 0x2b9: 0xa0, 0x2ba: 0xa0, 0x2bb: 0xa0, 0x2bc: 0xa0, 0x2bd: 0xa0, 0x2be: 0xa0, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xa0, 0x2c1: 0xa0, 0x2c2: 0xa0, 0x2c3: 0xa0, 0x2c4: 0xa0, 0x2c5: 0xa0, 0x2c6: 0xa0, 0x2c7: 0xa0, + 0x2c8: 0xa0, 0x2c9: 0xa0, 0x2ca: 0xa0, 0x2cb: 0xa0, 0x2cc: 0xa0, 0x2cd: 0xa0, 0x2ce: 0xa0, 0x2cf: 0xa0, + 0x2d0: 0xa0, 0x2d1: 0xa0, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0xa0, 0x2d5: 0xa0, 0x2d6: 0xa0, 0x2d7: 0xa0, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0xa0, 0x2f1: 0xa0, 0x2f2: 0xa0, 0x2f3: 0xa0, 0x2f4: 0xa0, 0x2f5: 0xa0, 0x2f6: 0xa0, 0x2f7: 0xa0, + 0x2f8: 0xa0, 0x2f9: 0xa0, 0x2fa: 0xa0, 0x2fb: 0xa0, 0x2fc: 0xa0, 0x2fd: 0xa0, 0x2fe: 0xa0, 0x2ff: 0xa0, + // Block 0xc, offset 0x300 + 0x300: 0xa0, 0x301: 0xa0, 0x302: 0xa0, 0x303: 0xa0, 0x304: 0xa0, 0x305: 0xa0, 0x306: 0xa0, 0x307: 0xa0, + 0x308: 0xa0, 0x309: 0xa0, 0x30a: 0xa0, 0x30b: 0xa0, 0x30c: 0xa0, 0x30d: 0xa0, 0x30e: 0xa0, 0x30f: 0xa0, + 0x310: 0xa0, 0x311: 0xa0, 0x312: 0xa0, 0x313: 0xa0, 0x314: 0xa0, 0x315: 0xa0, 0x316: 0xa0, 0x317: 0xa0, + 0x318: 0xa0, 0x319: 0xa0, 0x31a: 0xa0, 0x31b: 0xa0, 0x31c: 0xa0, 0x31d: 0xa0, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xfb, 0x341: 0xfb, 0x342: 0xfb, 0x343: 0xfb, 0x344: 0xfb, 0x345: 0xfb, 0x346: 0xfb, 0x347: 0xfb, + 0x348: 0xfb, 0x349: 0xfb, 0x34a: 0xfb, 0x34b: 0xfb, 0x34c: 0xfb, 0x34d: 0xfb, 0x34e: 0xfb, 0x34f: 0xfb, + 0x350: 0xfb, 0x351: 0xfb, 0x352: 0xfb, 0x353: 0xfb, 0x354: 0xfb, 0x355: 0xfb, 0x356: 0xfb, 0x357: 0xfb, + 0x358: 0xfb, 0x359: 0xfb, 0x35a: 0xfb, 0x35b: 0xfb, 0x35c: 0xfb, 0x35d: 0xfb, 0x35e: 0xfb, 0x35f: 0xfb, + 0x360: 0xfb, 0x361: 0xfb, 0x362: 0xfb, 0x363: 0xfb, 0x364: 0xfb, 0x365: 0xfb, 0x366: 0xfb, 0x367: 0xfb, + 0x368: 0xfb, 0x369: 0xfb, 0x36a: 0xfb, 0x36b: 0xfb, 0x36c: 0xfb, 0x36d: 0xfb, 0x36e: 0xfb, 0x36f: 0xfb, + 0x370: 0xfb, 0x371: 0xfb, 0x372: 0xfb, 0x373: 0xfb, 0x374: 0xfb, 0x375: 0xfb, 0x376: 0xfb, 0x377: 0xfb, + 0x378: 0xfb, 0x379: 0xfb, 0x37a: 0xfb, 0x37b: 0xfb, 0x37c: 0xfb, 0x37d: 0xfb, 0x37e: 0xfb, 0x37f: 0xfb, + // Block 0xe, offset 0x380 + 0x380: 0xfb, 0x381: 0xfb, 0x382: 0xfb, 0x383: 0xfb, 0x384: 0xfb, 0x385: 0xfb, 0x386: 0xfb, 0x387: 0xfb, + 0x388: 0xfb, 0x389: 0xfb, 0x38a: 0xfb, 0x38b: 0xfb, 0x38c: 0xfb, 0x38d: 0xfb, 0x38e: 0xfb, 0x38f: 0xfb, + 0x390: 0xfb, 0x391: 0xfb, 0x392: 0xfb, 0x393: 0xfb, 0x394: 0xfb, 0x395: 0xfb, 0x396: 0xfb, 0x397: 0xfb, + 0x398: 0xfb, 0x399: 0xfb, 0x39a: 0xfb, 0x39b: 0xfb, 0x39c: 0xfb, 0x39d: 0xfb, 0x39e: 0xfb, 0x39f: 0xfb, + 0x3a0: 0xfb, 0x3a1: 0xfb, 0x3a2: 0xfb, 0x3a3: 0xfb, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x47, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x102, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x103, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0xa0, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9c, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xfb, 0x3c9: 0xfb, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0xa0, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xfb, 0x3d7: 0xfb, + 0x3d8: 0xa0, 0x3d9: 0xa0, 0x3da: 0xa0, 0x3db: 0xa0, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xfb, 0x3df: 0xfb, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xfb, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5b, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5c, 0x3ef: 0xfb, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0x127, 0x3f5: 0xfb, 0x3f6: 0xfb, 0x3f7: 0xfb, + 0x3f8: 0xfb, 0x3f9: 0x128, 0x3fa: 0x129, 0x3fb: 0xfb, 0x3fc: 0x12a, 0x3fd: 0x12b, 0x3fe: 0x12c, 0x3ff: 0x12d, + // Block 0x10, offset 0x400 + 0x400: 0x12e, 0x401: 0x12f, 0x402: 0x130, 0x403: 0x131, 0x404: 0x132, 0x405: 0x133, 0x406: 0x134, 0x407: 0x135, + 0x408: 0x136, 0x409: 0xfb, 0x40a: 0x137, 0x40b: 0x138, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xfb, 0x40f: 0xfb, + 0x410: 0x139, 0x411: 0x13a, 0x412: 0x13b, 0x413: 0x13c, 0x414: 0xfb, 0x415: 0xfb, 0x416: 0x13d, 0x417: 0x13e, + 0x418: 0x13f, 0x419: 0x140, 0x41a: 0x141, 0x41b: 0x142, 0x41c: 0x143, 0x41d: 0xfb, 0x41e: 0xfb, 0x41f: 0xfb, + 0x420: 0x144, 0x421: 0xfb, 0x422: 0x145, 0x423: 0x146, 0x424: 0x5f, 0x425: 0x147, 0x426: 0x148, 0x427: 0x149, + 0x428: 0x14a, 0x429: 0x14b, 0x42a: 0x14c, 0x42b: 0x14d, 0x42c: 0xfb, 0x42d: 0xfb, 0x42e: 0xfb, 0x42f: 0xfb, + 0x430: 0x14e, 0x431: 0x14f, 0x432: 0x150, 0x433: 0xfb, 0x434: 0x151, 0x435: 0x152, 0x436: 0x153, 0x437: 0xfb, + 0x438: 0xfb, 0x439: 0xfb, 0x43a: 0xfb, 0x43b: 0x154, 0x43c: 0xfb, 0x43d: 0xfb, 0x43e: 0x155, 0x43f: 0x156, + // Block 0x11, offset 0x440 + 0x440: 0xa0, 0x441: 0xa0, 0x442: 0xa0, 0x443: 0xa0, 0x444: 0xa0, 0x445: 0xa0, 0x446: 0xa0, 0x447: 0xa0, + 0x448: 0xa0, 0x449: 0xa0, 0x44a: 0xa0, 0x44b: 0xa0, 0x44c: 0xa0, 0x44d: 0xa0, 0x44e: 0x157, 0x44f: 0xfb, + 0x450: 0x9c, 0x451: 0x158, 0x452: 0xa0, 0x453: 0xa0, 0x454: 0xa0, 0x455: 0x159, 0x456: 0xfb, 0x457: 0xfb, + 0x458: 0xfb, 0x459: 0xfb, 0x45a: 0xfb, 0x45b: 0xfb, 0x45c: 0xfb, 0x45d: 0xfb, 0x45e: 0xfb, 0x45f: 0xfb, + 0x460: 0xfb, 0x461: 0xfb, 0x462: 0xfb, 0x463: 0xfb, 0x464: 0xfb, 0x465: 0xfb, 0x466: 0xfb, 0x467: 0xfb, + 0x468: 0xfb, 0x469: 0xfb, 0x46a: 0xfb, 0x46b: 0xfb, 0x46c: 0xfb, 0x46d: 0xfb, 0x46e: 0xfb, 0x46f: 0xfb, + 0x470: 0xfb, 0x471: 0xfb, 0x472: 0xfb, 0x473: 0xfb, 0x474: 0xfb, 0x475: 0xfb, 0x476: 0xfb, 0x477: 0xfb, + 0x478: 0xfb, 0x479: 0xfb, 0x47a: 0xfb, 0x47b: 0xfb, 0x47c: 0xfb, 0x47d: 0xfb, 0x47e: 0xfb, 0x47f: 0xfb, + // Block 0x12, offset 0x480 + 0x480: 0xa0, 0x481: 0xa0, 0x482: 0xa0, 0x483: 0xa0, 0x484: 0xa0, 0x485: 0xa0, 0x486: 0xa0, 0x487: 0xa0, + 0x488: 0xa0, 0x489: 0xa0, 0x48a: 0xa0, 0x48b: 0xa0, 0x48c: 0xa0, 0x48d: 0xa0, 0x48e: 0xa0, 0x48f: 0xa0, + 0x490: 0x15a, 0x491: 0xfb, 0x492: 0xfb, 0x493: 0xfb, 0x494: 0xfb, 0x495: 0xfb, 0x496: 0xfb, 0x497: 0xfb, + 0x498: 0xfb, 0x499: 0xfb, 0x49a: 0xfb, 0x49b: 0xfb, 0x49c: 0xfb, 0x49d: 0xfb, 0x49e: 0xfb, 0x49f: 0xfb, + 0x4a0: 0xfb, 0x4a1: 0xfb, 0x4a2: 0xfb, 0x4a3: 0xfb, 0x4a4: 0xfb, 0x4a5: 0xfb, 0x4a6: 0xfb, 0x4a7: 0xfb, + 0x4a8: 0xfb, 0x4a9: 0xfb, 0x4aa: 0xfb, 0x4ab: 0xfb, 0x4ac: 0xfb, 0x4ad: 0xfb, 0x4ae: 0xfb, 0x4af: 0xfb, + 0x4b0: 0xfb, 0x4b1: 0xfb, 0x4b2: 0xfb, 0x4b3: 0xfb, 0x4b4: 0xfb, 0x4b5: 0xfb, 0x4b6: 0xfb, 0x4b7: 0xfb, + 0x4b8: 0xfb, 0x4b9: 0xfb, 0x4ba: 0xfb, 0x4bb: 0xfb, 0x4bc: 0xfb, 0x4bd: 0xfb, 0x4be: 0xfb, 0x4bf: 0xfb, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xfb, 0x4c1: 0xfb, 0x4c2: 0xfb, 0x4c3: 0xfb, 0x4c4: 0xfb, 0x4c5: 0xfb, 0x4c6: 0xfb, 0x4c7: 0xfb, + 0x4c8: 0xfb, 0x4c9: 0xfb, 0x4ca: 0xfb, 0x4cb: 0xfb, 0x4cc: 0xfb, 0x4cd: 0xfb, 0x4ce: 0xfb, 0x4cf: 0xfb, + 0x4d0: 0xa0, 0x4d1: 0xa0, 0x4d2: 0xa0, 0x4d3: 0xa0, 0x4d4: 0xa0, 0x4d5: 0xa0, 0x4d6: 0xa0, 0x4d7: 0xa0, + 0x4d8: 0xa0, 0x4d9: 0x15b, 0x4da: 0xfb, 0x4db: 0xfb, 0x4dc: 0xfb, 0x4dd: 0xfb, 0x4de: 0xfb, 0x4df: 0xfb, + 0x4e0: 0xfb, 0x4e1: 0xfb, 0x4e2: 0xfb, 0x4e3: 0xfb, 0x4e4: 0xfb, 0x4e5: 0xfb, 0x4e6: 0xfb, 0x4e7: 0xfb, + 0x4e8: 0xfb, 0x4e9: 0xfb, 0x4ea: 0xfb, 0x4eb: 0xfb, 0x4ec: 0xfb, 0x4ed: 0xfb, 0x4ee: 0xfb, 0x4ef: 0xfb, + 0x4f0: 0xfb, 0x4f1: 0xfb, 0x4f2: 0xfb, 0x4f3: 0xfb, 0x4f4: 0xfb, 0x4f5: 0xfb, 0x4f6: 0xfb, 0x4f7: 0xfb, + 0x4f8: 0xfb, 0x4f9: 0xfb, 0x4fa: 0xfb, 0x4fb: 0xfb, 0x4fc: 0xfb, 0x4fd: 0xfb, 0x4fe: 0xfb, 0x4ff: 0xfb, + // Block 0x14, offset 0x500 + 0x500: 0xfb, 0x501: 0xfb, 0x502: 0xfb, 0x503: 0xfb, 0x504: 0xfb, 0x505: 0xfb, 0x506: 0xfb, 0x507: 0xfb, + 0x508: 0xfb, 0x509: 0xfb, 0x50a: 0xfb, 0x50b: 0xfb, 0x50c: 0xfb, 0x50d: 0xfb, 0x50e: 0xfb, 0x50f: 0xfb, + 0x510: 0xfb, 0x511: 0xfb, 0x512: 0xfb, 0x513: 0xfb, 0x514: 0xfb, 0x515: 0xfb, 0x516: 0xfb, 0x517: 0xfb, + 0x518: 0xfb, 0x519: 0xfb, 0x51a: 0xfb, 0x51b: 0xfb, 0x51c: 0xfb, 0x51d: 0xfb, 0x51e: 0xfb, 0x51f: 0xfb, + 0x520: 0xa0, 0x521: 0xa0, 0x522: 0xa0, 0x523: 0xa0, 0x524: 0xa0, 0x525: 0xa0, 0x526: 0xa0, 0x527: 0xa0, + 0x528: 0x14d, 0x529: 0x15c, 0x52a: 0xfb, 0x52b: 0x15d, 0x52c: 0x15e, 0x52d: 0x15f, 0x52e: 0x160, 0x52f: 0xfb, + 0x530: 0xfb, 0x531: 0xfb, 0x532: 0xfb, 0x533: 0xfb, 0x534: 0xfb, 0x535: 0xfb, 0x536: 0xfb, 0x537: 0xfb, + 0x538: 0xfb, 0x539: 0x161, 0x53a: 0x162, 0x53b: 0xfb, 0x53c: 0xa0, 0x53d: 0x163, 0x53e: 0x164, 0x53f: 0x165, + // Block 0x15, offset 0x540 + 0x540: 0xa0, 0x541: 0xa0, 0x542: 0xa0, 0x543: 0xa0, 0x544: 0xa0, 0x545: 0xa0, 0x546: 0xa0, 0x547: 0xa0, + 0x548: 0xa0, 0x549: 0xa0, 0x54a: 0xa0, 0x54b: 0xa0, 0x54c: 0xa0, 0x54d: 0xa0, 0x54e: 0xa0, 0x54f: 0xa0, + 0x550: 0xa0, 0x551: 0xa0, 0x552: 0xa0, 0x553: 0xa0, 0x554: 0xa0, 0x555: 0xa0, 0x556: 0xa0, 0x557: 0xa0, + 0x558: 0xa0, 0x559: 0xa0, 0x55a: 0xa0, 0x55b: 0xa0, 0x55c: 0xa0, 0x55d: 0xa0, 0x55e: 0xa0, 0x55f: 0x166, + 0x560: 0xa0, 0x561: 0xa0, 0x562: 0xa0, 0x563: 0xa0, 0x564: 0xa0, 0x565: 0xa0, 0x566: 0xa0, 0x567: 0xa0, + 0x568: 0xa0, 0x569: 0xa0, 0x56a: 0xa0, 0x56b: 0xa0, 0x56c: 0xa0, 0x56d: 0xa0, 0x56e: 0xa0, 0x56f: 0xa0, + 0x570: 0xa0, 0x571: 0xa0, 0x572: 0xa0, 0x573: 0x167, 0x574: 0x168, 0x575: 0xfb, 0x576: 0xfb, 0x577: 0xfb, + 0x578: 0xfb, 0x579: 0xfb, 0x57a: 0xfb, 0x57b: 0xfb, 0x57c: 0xfb, 0x57d: 0xfb, 0x57e: 0xfb, 0x57f: 0xfb, + // Block 0x16, offset 0x580 + 0x580: 0xa0, 0x581: 0xa0, 0x582: 0xa0, 0x583: 0xa0, 0x584: 0x169, 0x585: 0x16a, 0x586: 0xa0, 0x587: 0xa0, + 0x588: 0xa0, 0x589: 0xa0, 0x58a: 0xa0, 0x58b: 0x16b, 0x58c: 0xfb, 0x58d: 0xfb, 0x58e: 0xfb, 0x58f: 0xfb, + 0x590: 0xfb, 0x591: 0xfb, 0x592: 0xfb, 0x593: 0xfb, 0x594: 0xfb, 0x595: 0xfb, 0x596: 0xfb, 0x597: 0xfb, + 0x598: 0xfb, 0x599: 0xfb, 0x59a: 0xfb, 0x59b: 0xfb, 0x59c: 0xfb, 0x59d: 0xfb, 0x59e: 0xfb, 0x59f: 0xfb, + 0x5a0: 0xfb, 0x5a1: 0xfb, 0x5a2: 0xfb, 0x5a3: 0xfb, 0x5a4: 0xfb, 0x5a5: 0xfb, 0x5a6: 0xfb, 0x5a7: 0xfb, + 0x5a8: 0xfb, 0x5a9: 0xfb, 0x5aa: 0xfb, 0x5ab: 0xfb, 0x5ac: 0xfb, 0x5ad: 0xfb, 0x5ae: 0xfb, 0x5af: 0xfb, + 0x5b0: 0xa0, 0x5b1: 0x16c, 0x5b2: 0x16d, 0x5b3: 0xfb, 0x5b4: 0xfb, 0x5b5: 0xfb, 0x5b6: 0xfb, 0x5b7: 0xfb, + 0x5b8: 0xfb, 0x5b9: 0xfb, 0x5ba: 0xfb, 0x5bb: 0xfb, 0x5bc: 0xfb, 0x5bd: 0xfb, 0x5be: 0xfb, 0x5bf: 0xfb, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9c, 0x5c1: 0x9c, 0x5c2: 0x9c, 0x5c3: 0x16e, 0x5c4: 0x16f, 0x5c5: 0x170, 0x5c6: 0x171, 0x5c7: 0x172, + 0x5c8: 0x9c, 0x5c9: 0x173, 0x5ca: 0xfb, 0x5cb: 0x174, 0x5cc: 0x9c, 0x5cd: 0x175, 0x5ce: 0xfb, 0x5cf: 0xfb, + 0x5d0: 0x60, 0x5d1: 0x61, 0x5d2: 0x62, 0x5d3: 0x63, 0x5d4: 0x64, 0x5d5: 0x65, 0x5d6: 0x66, 0x5d7: 0x67, + 0x5d8: 0x68, 0x5d9: 0x69, 0x5da: 0x6a, 0x5db: 0x6b, 0x5dc: 0x6c, 0x5dd: 0x6d, 0x5de: 0x6e, 0x5df: 0x6f, + 0x5e0: 0x9c, 0x5e1: 0x9c, 0x5e2: 0x9c, 0x5e3: 0x9c, 0x5e4: 0x9c, 0x5e5: 0x9c, 0x5e6: 0x9c, 0x5e7: 0x9c, + 0x5e8: 0x176, 0x5e9: 0x177, 0x5ea: 0x178, 0x5eb: 0xfb, 0x5ec: 0xfb, 0x5ed: 0xfb, 0x5ee: 0xfb, 0x5ef: 0xfb, + 0x5f0: 0xfb, 0x5f1: 0xfb, 0x5f2: 0xfb, 0x5f3: 0xfb, 0x5f4: 0xfb, 0x5f5: 0xfb, 0x5f6: 0xfb, 0x5f7: 0xfb, + 0x5f8: 0xfb, 0x5f9: 0xfb, 0x5fa: 0xfb, 0x5fb: 0xfb, 0x5fc: 0xfb, 0x5fd: 0xfb, 0x5fe: 0xfb, 0x5ff: 0xfb, + // Block 0x18, offset 0x600 + 0x600: 0x179, 0x601: 0xfb, 0x602: 0xfb, 0x603: 0xfb, 0x604: 0x17a, 0x605: 0x17b, 0x606: 0xfb, 0x607: 0xfb, + 0x608: 0xfb, 0x609: 0xfb, 0x60a: 0xfb, 0x60b: 0x17c, 0x60c: 0xfb, 0x60d: 0xfb, 0x60e: 0xfb, 0x60f: 0xfb, + 0x610: 0xfb, 0x611: 0xfb, 0x612: 0xfb, 0x613: 0xfb, 0x614: 0xfb, 0x615: 0xfb, 0x616: 0xfb, 0x617: 0xfb, + 0x618: 0xfb, 0x619: 0xfb, 0x61a: 0xfb, 0x61b: 0xfb, 0x61c: 0xfb, 0x61d: 0xfb, 0x61e: 0xfb, 0x61f: 0xfb, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x17d, 0x624: 0x70, 0x625: 0x17e, 0x626: 0xfb, 0x627: 0xfb, + 0x628: 0xfb, 0x629: 0xfb, 0x62a: 0xfb, 0x62b: 0xfb, 0x62c: 0xfb, 0x62d: 0xfb, 0x62e: 0xfb, 0x62f: 0xfb, + 0x630: 0xfb, 0x631: 0x17f, 0x632: 0x180, 0x633: 0xfb, 0x634: 0x181, 0x635: 0xfb, 0x636: 0xfb, 0x637: 0xfb, + 0x638: 0x71, 0x639: 0x72, 0x63a: 0x73, 0x63b: 0x182, 0x63c: 0xfb, 0x63d: 0xfb, 0x63e: 0xfb, 0x63f: 0xfb, + // Block 0x19, offset 0x640 + 0x640: 0x183, 0x641: 0x9c, 0x642: 0x184, 0x643: 0x185, 0x644: 0x74, 0x645: 0x75, 0x646: 0x186, 0x647: 0x187, + 0x648: 0x76, 0x649: 0x188, 0x64a: 0xfb, 0x64b: 0xfb, 0x64c: 0x9c, 0x64d: 0x9c, 0x64e: 0x9c, 0x64f: 0x9c, + 0x650: 0x9c, 0x651: 0x9c, 0x652: 0x9c, 0x653: 0x9c, 0x654: 0x9c, 0x655: 0x9c, 0x656: 0x9c, 0x657: 0x9c, + 0x658: 0x9c, 0x659: 0x9c, 0x65a: 0x9c, 0x65b: 0x189, 0x65c: 0x9c, 0x65d: 0x18a, 0x65e: 0x9c, 0x65f: 0x18b, + 0x660: 0x18c, 0x661: 0x18d, 0x662: 0x18e, 0x663: 0xfb, 0x664: 0x9c, 0x665: 0x18f, 0x666: 0x9c, 0x667: 0x190, + 0x668: 0x9c, 0x669: 0x191, 0x66a: 0x192, 0x66b: 0x193, 0x66c: 0x9c, 0x66d: 0x9c, 0x66e: 0x194, 0x66f: 0x195, + 0x670: 0xfb, 0x671: 0xfb, 0x672: 0xfb, 0x673: 0xfb, 0x674: 0xfb, 0x675: 0xfb, 0x676: 0xfb, 0x677: 0xfb, + 0x678: 0xfb, 0x679: 0xfb, 0x67a: 0xfb, 0x67b: 0xfb, 0x67c: 0xfb, 0x67d: 0xfb, 0x67e: 0xfb, 0x67f: 0xfb, + // Block 0x1a, offset 0x680 + 0x680: 0xa0, 0x681: 0xa0, 0x682: 0xa0, 0x683: 0xa0, 0x684: 0xa0, 0x685: 0xa0, 0x686: 0xa0, 0x687: 0xa0, + 0x688: 0xa0, 0x689: 0xa0, 0x68a: 0xa0, 0x68b: 0xa0, 0x68c: 0xa0, 0x68d: 0xa0, 0x68e: 0xa0, 0x68f: 0xa0, + 0x690: 0xa0, 0x691: 0xa0, 0x692: 0xa0, 0x693: 0xa0, 0x694: 0xa0, 0x695: 0xa0, 0x696: 0xa0, 0x697: 0xa0, + 0x698: 0xa0, 0x699: 0xa0, 0x69a: 0xa0, 0x69b: 0x196, 0x69c: 0xa0, 0x69d: 0xa0, 0x69e: 0xa0, 0x69f: 0xa0, + 0x6a0: 0xa0, 0x6a1: 0xa0, 0x6a2: 0xa0, 0x6a3: 0xa0, 0x6a4: 0xa0, 0x6a5: 0xa0, 0x6a6: 0xa0, 0x6a7: 0xa0, + 0x6a8: 0xa0, 0x6a9: 0xa0, 0x6aa: 0xa0, 0x6ab: 0xa0, 0x6ac: 0xa0, 0x6ad: 0xa0, 0x6ae: 0xa0, 0x6af: 0xa0, + 0x6b0: 0xa0, 0x6b1: 0xa0, 0x6b2: 0xa0, 0x6b3: 0xa0, 0x6b4: 0xa0, 0x6b5: 0xa0, 0x6b6: 0xa0, 0x6b7: 0xa0, + 0x6b8: 0xa0, 0x6b9: 0xa0, 0x6ba: 0xa0, 0x6bb: 0xa0, 0x6bc: 0xa0, 0x6bd: 0xa0, 0x6be: 0xa0, 0x6bf: 0xa0, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0xa0, 0x6c1: 0xa0, 0x6c2: 0xa0, 0x6c3: 0xa0, 0x6c4: 0xa0, 0x6c5: 0xa0, 0x6c6: 0xa0, 0x6c7: 0xa0, + 0x6c8: 0xa0, 0x6c9: 0xa0, 0x6ca: 0xa0, 0x6cb: 0xa0, 0x6cc: 0xa0, 0x6cd: 0xa0, 0x6ce: 0xa0, 0x6cf: 0xa0, + 0x6d0: 0xa0, 0x6d1: 0xa0, 0x6d2: 0xa0, 0x6d3: 0xa0, 0x6d4: 0xa0, 0x6d5: 0xa0, 0x6d6: 0xa0, 0x6d7: 0xa0, + 0x6d8: 0xa0, 0x6d9: 0xa0, 0x6da: 0xa0, 0x6db: 0xa0, 0x6dc: 0x197, 0x6dd: 0xa0, 0x6de: 0xa0, 0x6df: 0xa0, + 0x6e0: 0x198, 0x6e1: 0xa0, 0x6e2: 0xa0, 0x6e3: 0xa0, 0x6e4: 0xa0, 0x6e5: 0xa0, 0x6e6: 0xa0, 0x6e7: 0xa0, + 0x6e8: 0xa0, 0x6e9: 0xa0, 0x6ea: 0xa0, 0x6eb: 0xa0, 0x6ec: 0xa0, 0x6ed: 0xa0, 0x6ee: 0xa0, 0x6ef: 0xa0, + 0x6f0: 0xa0, 0x6f1: 0xa0, 0x6f2: 0xa0, 0x6f3: 0xa0, 0x6f4: 0xa0, 0x6f5: 0xa0, 0x6f6: 0xa0, 0x6f7: 0xa0, + 0x6f8: 0xa0, 0x6f9: 0xa0, 0x6fa: 0xa0, 0x6fb: 0xa0, 0x6fc: 0xa0, 0x6fd: 0xa0, 0x6fe: 0xa0, 0x6ff: 0xa0, + // Block 0x1c, offset 0x700 + 0x700: 0xa0, 0x701: 0xa0, 0x702: 0xa0, 0x703: 0xa0, 0x704: 0xa0, 0x705: 0xa0, 0x706: 0xa0, 0x707: 0xa0, + 0x708: 0xa0, 0x709: 0xa0, 0x70a: 0xa0, 0x70b: 0xa0, 0x70c: 0xa0, 0x70d: 0xa0, 0x70e: 0xa0, 0x70f: 0xa0, + 0x710: 0xa0, 0x711: 0xa0, 0x712: 0xa0, 0x713: 0xa0, 0x714: 0xa0, 0x715: 0xa0, 0x716: 0xa0, 0x717: 0xa0, + 0x718: 0xa0, 0x719: 0xa0, 0x71a: 0xa0, 0x71b: 0xa0, 0x71c: 0xa0, 0x71d: 0xa0, 0x71e: 0xa0, 0x71f: 0xa0, + 0x720: 0xa0, 0x721: 0xa0, 0x722: 0xa0, 0x723: 0xa0, 0x724: 0xa0, 0x725: 0xa0, 0x726: 0xa0, 0x727: 0xa0, + 0x728: 0xa0, 0x729: 0xa0, 0x72a: 0xa0, 0x72b: 0xa0, 0x72c: 0xa0, 0x72d: 0xa0, 0x72e: 0xa0, 0x72f: 0xa0, + 0x730: 0xa0, 0x731: 0xa0, 0x732: 0xa0, 0x733: 0xa0, 0x734: 0xa0, 0x735: 0xa0, 0x736: 0xa0, 0x737: 0xa0, + 0x738: 0xa0, 0x739: 0xa0, 0x73a: 0x199, 0x73b: 0xa0, 0x73c: 0xa0, 0x73d: 0xa0, 0x73e: 0xa0, 0x73f: 0xa0, + // Block 0x1d, offset 0x740 + 0x740: 0xa0, 0x741: 0xa0, 0x742: 0xa0, 0x743: 0xa0, 0x744: 0xa0, 0x745: 0xa0, 0x746: 0xa0, 0x747: 0xa0, + 0x748: 0xa0, 0x749: 0xa0, 0x74a: 0xa0, 0x74b: 0xa0, 0x74c: 0xa0, 0x74d: 0xa0, 0x74e: 0xa0, 0x74f: 0xa0, + 0x750: 0xa0, 0x751: 0xa0, 0x752: 0xa0, 0x753: 0xa0, 0x754: 0xa0, 0x755: 0xa0, 0x756: 0xa0, 0x757: 0xa0, + 0x758: 0xa0, 0x759: 0xa0, 0x75a: 0xa0, 0x75b: 0xa0, 0x75c: 0xa0, 0x75d: 0xa0, 0x75e: 0xa0, 0x75f: 0xa0, + 0x760: 0xa0, 0x761: 0xa0, 0x762: 0xa0, 0x763: 0xa0, 0x764: 0xa0, 0x765: 0xa0, 0x766: 0xa0, 0x767: 0xa0, + 0x768: 0xa0, 0x769: 0xa0, 0x76a: 0xa0, 0x76b: 0xa0, 0x76c: 0xa0, 0x76d: 0xa0, 0x76e: 0xa0, 0x76f: 0x19a, + 0x770: 0xfb, 0x771: 0xfb, 0x772: 0xfb, 0x773: 0xfb, 0x774: 0xfb, 0x775: 0xfb, 0x776: 0xfb, 0x777: 0xfb, + 0x778: 0xfb, 0x779: 0xfb, 0x77a: 0xfb, 0x77b: 0xfb, 0x77c: 0xfb, 0x77d: 0xfb, 0x77e: 0xfb, 0x77f: 0xfb, + // Block 0x1e, offset 0x780 + 0x780: 0xfb, 0x781: 0xfb, 0x782: 0xfb, 0x783: 0xfb, 0x784: 0xfb, 0x785: 0xfb, 0x786: 0xfb, 0x787: 0xfb, + 0x788: 0xfb, 0x789: 0xfb, 0x78a: 0xfb, 0x78b: 0xfb, 0x78c: 0xfb, 0x78d: 0xfb, 0x78e: 0xfb, 0x78f: 0xfb, + 0x790: 0xfb, 0x791: 0xfb, 0x792: 0xfb, 0x793: 0xfb, 0x794: 0xfb, 0x795: 0xfb, 0x796: 0xfb, 0x797: 0xfb, + 0x798: 0xfb, 0x799: 0xfb, 0x79a: 0xfb, 0x79b: 0xfb, 0x79c: 0xfb, 0x79d: 0xfb, 0x79e: 0xfb, 0x79f: 0xfb, + 0x7a0: 0x77, 0x7a1: 0x78, 0x7a2: 0x79, 0x7a3: 0x19b, 0x7a4: 0x7a, 0x7a5: 0x7b, 0x7a6: 0x19c, 0x7a7: 0x7c, + 0x7a8: 0x7d, 0x7a9: 0xfb, 0x7aa: 0xfb, 0x7ab: 0xfb, 0x7ac: 0xfb, 0x7ad: 0xfb, 0x7ae: 0xfb, 0x7af: 0xfb, + 0x7b0: 0xfb, 0x7b1: 0xfb, 0x7b2: 0xfb, 0x7b3: 0xfb, 0x7b4: 0xfb, 0x7b5: 0xfb, 0x7b6: 0xfb, 0x7b7: 0xfb, + 0x7b8: 0xfb, 0x7b9: 0xfb, 0x7ba: 0xfb, 0x7bb: 0xfb, 0x7bc: 0xfb, 0x7bd: 0xfb, 0x7be: 0xfb, 0x7bf: 0xfb, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0xa0, 0x7c1: 0xa0, 0x7c2: 0xa0, 0x7c3: 0xa0, 0x7c4: 0xa0, 0x7c5: 0xa0, 0x7c6: 0xa0, 0x7c7: 0xa0, + 0x7c8: 0xa0, 0x7c9: 0xa0, 0x7ca: 0xa0, 0x7cb: 0xa0, 0x7cc: 0xa0, 0x7cd: 0x19d, 0x7ce: 0xfb, 0x7cf: 0xfb, + 0x7d0: 0xfb, 0x7d1: 0xfb, 0x7d2: 0xfb, 0x7d3: 0xfb, 0x7d4: 0xfb, 0x7d5: 0xfb, 0x7d6: 0xfb, 0x7d7: 0xfb, + 0x7d8: 0xfb, 0x7d9: 0xfb, 0x7da: 0xfb, 0x7db: 0xfb, 0x7dc: 0xfb, 0x7dd: 0xfb, 0x7de: 0xfb, 0x7df: 0xfb, + 0x7e0: 0xfb, 0x7e1: 0xfb, 0x7e2: 0xfb, 0x7e3: 0xfb, 0x7e4: 0xfb, 0x7e5: 0xfb, 0x7e6: 0xfb, 0x7e7: 0xfb, + 0x7e8: 0xfb, 0x7e9: 0xfb, 0x7ea: 0xfb, 0x7eb: 0xfb, 0x7ec: 0xfb, 0x7ed: 0xfb, 0x7ee: 0xfb, 0x7ef: 0xfb, + 0x7f0: 0xfb, 0x7f1: 0xfb, 0x7f2: 0xfb, 0x7f3: 0xfb, 0x7f4: 0xfb, 0x7f5: 0xfb, 0x7f6: 0xfb, 0x7f7: 0xfb, + 0x7f8: 0xfb, 0x7f9: 0xfb, 0x7fa: 0xfb, 0x7fb: 0xfb, 0x7fc: 0xfb, 0x7fd: 0xfb, 0x7fe: 0xfb, 0x7ff: 0xfb, + // Block 0x20, offset 0x800 + 0x810: 0x0d, 0x811: 0x0e, 0x812: 0x0f, 0x813: 0x10, 0x814: 0x11, 0x815: 0x0b, 0x816: 0x12, 0x817: 0x07, + 0x818: 0x13, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x14, 0x81c: 0x0b, 0x81d: 0x15, 0x81e: 0x16, 0x81f: 0x17, + 0x820: 0x07, 0x821: 0x07, 0x822: 0x07, 0x823: 0x07, 0x824: 0x07, 0x825: 0x07, 0x826: 0x07, 0x827: 0x07, + 0x828: 0x07, 0x829: 0x07, 0x82a: 0x18, 0x82b: 0x19, 0x82c: 0x1a, 0x82d: 0x07, 0x82e: 0x1b, 0x82f: 0x1c, + 0x830: 0x07, 0x831: 0x1d, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x0b, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x19e, 0x881: 0x19f, 0x882: 0xfb, 0x883: 0xfb, 0x884: 0x1a0, 0x885: 0x1a0, 0x886: 0x1a0, 0x887: 0x1a1, + 0x888: 0xfb, 0x889: 0xfb, 0x88a: 0xfb, 0x88b: 0xfb, 0x88c: 0xfb, 0x88d: 0xfb, 0x88e: 0xfb, 0x88f: 0xfb, + 0x890: 0xfb, 0x891: 0xfb, 0x892: 0xfb, 0x893: 0xfb, 0x894: 0xfb, 0x895: 0xfb, 0x896: 0xfb, 0x897: 0xfb, + 0x898: 0xfb, 0x899: 0xfb, 0x89a: 0xfb, 0x89b: 0xfb, 0x89c: 0xfb, 0x89d: 0xfb, 0x89e: 0xfb, 0x89f: 0xfb, + 0x8a0: 0xfb, 0x8a1: 0xfb, 0x8a2: 0xfb, 0x8a3: 0xfb, 0x8a4: 0xfb, 0x8a5: 0xfb, 0x8a6: 0xfb, 0x8a7: 0xfb, + 0x8a8: 0xfb, 0x8a9: 0xfb, 0x8aa: 0xfb, 0x8ab: 0xfb, 0x8ac: 0xfb, 0x8ad: 0xfb, 0x8ae: 0xfb, 0x8af: 0xfb, + 0x8b0: 0xfb, 0x8b1: 0xfb, 0x8b2: 0xfb, 0x8b3: 0xfb, 0x8b4: 0xfb, 0x8b5: 0xfb, 0x8b6: 0xfb, 0x8b7: 0xfb, + 0x8b8: 0xfb, 0x8b9: 0xfb, 0x8ba: 0xfb, 0x8bb: 0xfb, 0x8bc: 0xfb, 0x8bd: 0xfb, 0x8be: 0xfb, 0x8bf: 0xfb, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, + 0x8d0: 0x0b, 0x8d1: 0x0b, 0x8d2: 0x0b, 0x8d3: 0x0b, 0x8d4: 0x0b, 0x8d5: 0x0b, 0x8d6: 0x0b, 0x8d7: 0x0b, + 0x8d8: 0x0b, 0x8d9: 0x0b, 0x8da: 0x0b, 0x8db: 0x0b, 0x8dc: 0x0b, 0x8dd: 0x0b, 0x8de: 0x0b, 0x8df: 0x0b, + 0x8e0: 0x20, 0x8e1: 0x0b, 0x8e2: 0x0b, 0x8e3: 0x0b, 0x8e4: 0x0b, 0x8e5: 0x0b, 0x8e6: 0x0b, 0x8e7: 0x0b, + 0x8e8: 0x0b, 0x8e9: 0x0b, 0x8ea: 0x0b, 0x8eb: 0x0b, 0x8ec: 0x0b, 0x8ed: 0x0b, 0x8ee: 0x0b, 0x8ef: 0x0b, + 0x8f0: 0x0b, 0x8f1: 0x0b, 0x8f2: 0x0b, 0x8f3: 0x0b, 0x8f4: 0x0b, 0x8f5: 0x0b, 0x8f6: 0x0b, 0x8f7: 0x0b, + 0x8f8: 0x0b, 0x8f9: 0x0b, 0x8fa: 0x0b, 0x8fb: 0x0b, 0x8fc: 0x0b, 0x8fd: 0x0b, 0x8fe: 0x0b, 0x8ff: 0x0b, + // Block 0x24, offset 0x900 + 0x900: 0x0b, 0x901: 0x0b, 0x902: 0x0b, 0x903: 0x0b, 0x904: 0x0b, 0x905: 0x0b, 0x906: 0x0b, 0x907: 0x0b, + 0x908: 0x0b, 0x909: 0x0b, 0x90a: 0x0b, 0x90b: 0x0b, 0x90c: 0x0b, 0x90d: 0x0b, 0x90e: 0x0b, 0x90f: 0x0b, +} + +// idnaSparseOffset: 292 entries, 584 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x85, 0x8b, 0x94, 0xa4, 0xb2, 0xbd, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x225, 0x22f, 0x23b, 0x247, 0x253, 0x25b, 0x260, 0x26d, 0x27e, 0x282, 0x28d, 0x291, 0x29a, 0x2a2, 0x2a8, 0x2ad, 0x2b0, 0x2b4, 0x2ba, 0x2be, 0x2c2, 0x2c6, 0x2cc, 0x2d4, 0x2db, 0x2e6, 0x2f0, 0x2f4, 0x2f7, 0x2fd, 0x301, 0x303, 0x306, 0x308, 0x30b, 0x315, 0x318, 0x327, 0x32b, 0x330, 0x333, 0x337, 0x33c, 0x341, 0x347, 0x358, 0x368, 0x36e, 0x372, 0x381, 0x386, 0x38e, 0x398, 0x3a3, 0x3ab, 0x3bc, 0x3c5, 0x3d5, 0x3e2, 0x3ee, 0x3f3, 0x400, 0x404, 0x409, 0x40b, 0x40d, 0x411, 0x413, 0x417, 0x420, 0x426, 0x42a, 0x43a, 0x444, 0x449, 0x44c, 0x452, 0x459, 0x45e, 0x462, 0x468, 0x46d, 0x476, 0x47b, 0x481, 0x488, 0x48f, 0x496, 0x49a, 0x49f, 0x4a2, 0x4a7, 0x4b3, 0x4b9, 0x4be, 0x4c5, 0x4cd, 0x4d2, 0x4d6, 0x4e6, 0x4ed, 0x4f1, 0x4f5, 0x4fc, 0x4fe, 0x501, 0x504, 0x508, 0x511, 0x515, 0x51d, 0x525, 0x52d, 0x539, 0x545, 0x54b, 0x554, 0x560, 0x567, 0x570, 0x57b, 0x582, 0x591, 0x59e, 0x5ab, 0x5b4, 0x5b8, 0x5c7, 0x5cf, 0x5da, 0x5e3, 0x5e9, 0x5f1, 0x5fa, 0x605, 0x608, 0x614, 0x61d, 0x620, 0x625, 0x62e, 0x633, 0x640, 0x64b, 0x654, 0x65e, 0x661, 0x66b, 0x674, 0x680, 0x68d, 0x69a, 0x6a8, 0x6af, 0x6b3, 0x6b7, 0x6ba, 0x6bf, 0x6c2, 0x6c7, 0x6ca, 0x6d1, 0x6d8, 0x6dc, 0x6e7, 0x6ea, 0x6ed, 0x6f0, 0x6f6, 0x6fc, 0x705, 0x708, 0x70b, 0x70e, 0x711, 0x718, 0x71b, 0x720, 0x72a, 0x72d, 0x731, 0x740, 0x74c, 0x750, 0x755, 0x759, 0x75e, 0x762, 0x767, 0x770, 0x77b, 0x781, 0x787, 0x78d, 0x793, 0x79c, 0x79f, 0x7a2, 0x7a6, 0x7aa, 0x7ae, 0x7b4, 0x7ba, 0x7bf, 0x7c2, 0x7d2, 0x7d9, 0x7dc, 0x7e1, 0x7e5, 0x7eb, 0x7f2, 0x7f6, 0x7fa, 0x803, 0x80a, 0x80f, 0x813, 0x821, 0x824, 0x827, 0x82b, 0x82f, 0x832, 0x842, 0x853, 0x856, 0x85b, 0x85d, 0x85f} + +// idnaSparseValues: 2146 entries, 8584 bytes +var idnaSparseValues = [2146]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x05}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x02}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0x3e, offset 0x225 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22f + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23b + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x247 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x253 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x260 + {value: 0x0000, lo: 0x0c}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x059d, lo: 0x90, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x059d, lo: 0xbd, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x46, offset 0x27e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x282 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x28d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x291 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x29a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x2a2 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09dd, lo: 0xa9, hi: 0xa9}, + {value: 0x09fd, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2ad + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2b0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b4 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2c2 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0xbf}, + // Block 0x53, offset 0x2c6 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ebd, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2cc + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x56, offset 0x2db + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x58, offset 0x2f0 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xbf}, + // Block 0x5a, offset 0x2f7 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x2fd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0f15, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0020, lo: 0x01}, + {value: 0x0f35, lo: 0x80, hi: 0xbf}, + // Block 0x5d, offset 0x303 + {value: 0x0020, lo: 0x02}, + {value: 0x1735, lo: 0x80, hi: 0x8f}, + {value: 0x1915, lo: 0x90, hi: 0xbf}, + // Block 0x5e, offset 0x306 + {value: 0x0020, lo: 0x01}, + {value: 0x1f15, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x308 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x60, offset 0x30b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x61, offset 0x315 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x318 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a35, lo: 0xb1, hi: 0xb1}, + {value: 0x2a55, lo: 0xb2, hi: 0xb2}, + {value: 0x2a75, lo: 0xb3, hi: 0xb3}, + {value: 0x2a95, lo: 0xb4, hi: 0xb4}, + {value: 0x2a75, lo: 0xb5, hi: 0xb5}, + {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, + {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, + {value: 0x2af5, lo: 0xb8, hi: 0xb9}, + {value: 0x2b15, lo: 0xba, hi: 0xbb}, + {value: 0x2b35, lo: 0xbc, hi: 0xbd}, + {value: 0x2b15, lo: 0xbe, hi: 0xbf}, + // Block 0x63, offset 0x327 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x65, offset 0x330 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x333 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x33c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x69, offset 0x341 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6a, offset 0x347 + {value: 0x0000, lo: 0x10}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0xe00d, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x83}, + {value: 0x03f5, lo: 0x84, hi: 0x84}, + {value: 0x1329, lo: 0x85, hi: 0x85}, + {value: 0x447d, lo: 0x86, hi: 0x86}, + {value: 0xe07d, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0xe01d, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xb4}, + {value: 0xe01d, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6ec1, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6b, offset 0x358 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x3b08, lo: 0xac, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x368 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x36e + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6e, offset 0x372 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x381 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x70, offset 0x386 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x71, offset 0x38e + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x398 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x3a3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x74, offset 0x3ab + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x75, offset 0x3bc + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3c5 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3d5 + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x78, offset 0x3e2 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x449d, lo: 0x9c, hi: 0x9c}, + {value: 0x44b5, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa8}, + {value: 0x6ed9, lo: 0xa9, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x44cd, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3ee + {value: 0x0000, lo: 0x04}, + {value: 0x44ed, lo: 0x80, hi: 0x8f}, + {value: 0x450d, lo: 0x90, hi: 0x9f}, + {value: 0x452d, lo: 0xa0, hi: 0xaf}, + {value: 0x450d, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3f3 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7b, offset 0x400 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x404 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7d, offset 0x409 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x7e, offset 0x40b + {value: 0x0020, lo: 0x01}, + {value: 0x454d, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x40d + {value: 0x0020, lo: 0x03}, + {value: 0x4d4d, lo: 0x80, hi: 0x94}, + {value: 0x4b0d, lo: 0x95, hi: 0x95}, + {value: 0x4fed, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x552d, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x5d2d, lo: 0x80, hi: 0x84}, + {value: 0x568d, lo: 0x85, hi: 0x85}, + {value: 0x5dcd, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x08}, + {value: 0x6b8d, lo: 0x80, hi: 0x8f}, + {value: 0x6d4d, lo: 0x90, hi: 0x90}, + {value: 0x6d8d, lo: 0x91, hi: 0xab}, + {value: 0x6ef1, lo: 0xac, hi: 0xac}, + {value: 0x70ed, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x710d, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x420 + {value: 0x0020, lo: 0x05}, + {value: 0x730d, lo: 0x80, hi: 0xad}, + {value: 0x656d, lo: 0xae, hi: 0xae}, + {value: 0x78cd, lo: 0xaf, hi: 0xb5}, + {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, + {value: 0x79ad, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x426 + {value: 0x0028, lo: 0x03}, + {value: 0x7c71, lo: 0x80, hi: 0x82}, + {value: 0x7c31, lo: 0x83, hi: 0x83}, + {value: 0x7ce9, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x42a + {value: 0x0038, lo: 0x0f}, + {value: 0x9e01, lo: 0x80, hi: 0x83}, + {value: 0x9ea9, lo: 0x84, hi: 0x85}, + {value: 0x9ee1, lo: 0x86, hi: 0x87}, + {value: 0x9f19, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa0d9, lo: 0x92, hi: 0x97}, + {value: 0xa1f1, lo: 0x98, hi: 0x9c}, + {value: 0xa2d1, lo: 0x9d, hi: 0xb3}, + {value: 0x9d91, lo: 0xb4, hi: 0xb4}, + {value: 0x9e01, lo: 0xb5, hi: 0xb5}, + {value: 0xa7d9, lo: 0xb6, hi: 0xbb}, + {value: 0xa8b9, lo: 0xbc, hi: 0xbc}, + {value: 0xa849, lo: 0xbd, hi: 0xbd}, + {value: 0xa929, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x43a + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x444 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x449 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x44c + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x452 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x45e + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x462 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x468 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x46d + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x476 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x47b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x481 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8b0d, lo: 0x98, hi: 0x9f}, + {value: 0x8b25, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x488 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8b25, lo: 0xb0, hi: 0xb7}, + {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x48f + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x496 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x49a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x49f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x4a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x4a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4b3 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4be + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4c5 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4cd + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4d2 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4d6 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4ed + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4f5 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4fc + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4fe + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x501 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x508 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x511 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x515 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xac}, + {value: 0x0818, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xac, offset 0x51d + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xad, offset 0x525 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xae, offset 0x52d + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb3}, + {value: 0x0c08, lo: 0xb4, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb7}, + {value: 0x0a08, lo: 0xb8, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xba}, + {value: 0x0a08, lo: 0xbb, hi: 0xbc}, + {value: 0x0c08, lo: 0xbd, hi: 0xbd}, + {value: 0x0a08, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x539 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x81}, + {value: 0x0c08, lo: 0x82, hi: 0x83}, + {value: 0x0a08, lo: 0x84, hi: 0x84}, + {value: 0x0818, lo: 0x85, hi: 0x88}, + {value: 0x0c18, lo: 0x89, hi: 0x89}, + {value: 0x0a18, lo: 0x8a, hi: 0x8a}, + {value: 0x0918, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x545 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb1, offset 0x54b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xb2, offset 0x554 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb3, offset 0x560 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb4, offset 0x567 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb5, offset 0x570 + {value: 0x0000, lo: 0x0a}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb6, offset 0x57b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x582 + {value: 0x0000, lo: 0x0e}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8e, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb8, offset 0x591 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x59e + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xba, offset 0x5ab + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xbb, offset 0x5b4 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xbc, offset 0x5b8 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xbd, offset 0x5c7 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5cf + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbf, offset 0x5da + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc0, offset 0x5e3 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xc1, offset 0x5e9 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5f1 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc3, offset 0x5fa + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc4, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc5, offset 0x608 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc6, offset 0x614 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc7, offset 0x61d + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc8, offset 0x620 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x625 + {value: 0x0000, lo: 0x08}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xca, offset 0x62e + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcb, offset 0x633 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x9a, hi: 0x9b}, + {value: 0x3008, lo: 0x9c, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xbf}, + // Block 0xcc, offset 0x640 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x64b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xce, offset 0x654 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xcf, offset 0x65e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd0, offset 0x661 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x66b + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xd2, offset 0x674 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x680 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xd4, offset 0x68d + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xd5, offset 0x69a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd6, offset 0x6a8 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd7, offset 0x6af + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0xd8, offset 0x6b3 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xd9, offset 0x6b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xda, offset 0x6ba + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xdb, offset 0x6bf + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xdc, offset 0x6c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0340, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xdd, offset 0x6c7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xde, offset 0x6ca + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xdf, offset 0x6d1 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x6d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xe1, offset 0x6dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xe2, offset 0x6e7 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xe3, offset 0x6ea + {value: 0x0000, lo: 0x02}, + {value: 0xe105, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xe4, offset 0x6ed + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xe5, offset 0x6f0 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbf}, + // Block 0xe6, offset 0x6f6 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6fc + {value: 0x0000, lo: 0x08}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0018, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x705 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xe9, offset 0x708 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0xea, offset 0x70b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xeb, offset 0x70e + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xec, offset 0x711 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xa3}, + {value: 0x0008, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xed, offset 0x718 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xee, offset 0x71b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xef, offset 0x720 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xf0, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xf2, offset 0x731 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb609, lo: 0x9e, hi: 0x9e}, + {value: 0xb651, lo: 0x9f, hi: 0x9f}, + {value: 0xb699, lo: 0xa0, hi: 0xa0}, + {value: 0xb701, lo: 0xa1, hi: 0xa1}, + {value: 0xb769, lo: 0xa2, hi: 0xa2}, + {value: 0xb7d1, lo: 0xa3, hi: 0xa3}, + {value: 0xb839, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xf3, offset 0x740 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb8a1, lo: 0xbb, hi: 0xbb}, + {value: 0xb8e9, lo: 0xbc, hi: 0xbc}, + {value: 0xb931, lo: 0xbd, hi: 0xbd}, + {value: 0xb999, lo: 0xbe, hi: 0xbe}, + {value: 0xba01, lo: 0xbf, hi: 0xbf}, + // Block 0xf4, offset 0x74c + {value: 0x0000, lo: 0x03}, + {value: 0xba69, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xf5, offset 0x750 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xf6, offset 0x755 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf7, offset 0x759 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf8, offset 0x75e + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf9, offset 0x762 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xfa, offset 0x767 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xfb, offset 0x770 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xfc, offset 0x77b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xfd, offset 0x781 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xfe, offset 0x787 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xff, offset 0x78d + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x100, offset 0x793 + {value: 0x0000, lo: 0x08}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0b08, lo: 0x8b, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x101, offset 0x79c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0x102, offset 0x79f + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x103, offset 0x7a2 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0818, lo: 0x81, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x104, offset 0x7a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x105, offset 0x7aa + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x106, offset 0x7ae + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x107, offset 0x7b4 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x108, offset 0x7ba + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc229, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x7bf + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0x10a, offset 0x7c2 + {value: 0x0000, lo: 0x0f}, + {value: 0xc851, lo: 0x80, hi: 0x80}, + {value: 0xc8a1, lo: 0x81, hi: 0x81}, + {value: 0xc8f1, lo: 0x82, hi: 0x82}, + {value: 0xc941, lo: 0x83, hi: 0x83}, + {value: 0xc991, lo: 0x84, hi: 0x84}, + {value: 0xc9e1, lo: 0x85, hi: 0x85}, + {value: 0xca31, lo: 0x86, hi: 0x86}, + {value: 0xca81, lo: 0x87, hi: 0x87}, + {value: 0xcad1, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcb21, lo: 0x90, hi: 0x90}, + {value: 0xcb41, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0x10b, offset 0x7d2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x10c, offset 0x7d9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x10d, offset 0x7dc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x10e, offset 0x7e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x10f, offset 0x7e5 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x110, offset 0x7eb + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0x111, offset 0x7f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0x112, offset 0x7f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x113, offset 0x7fa + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x114, offset 0x803 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x115, offset 0x80a + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x116, offset 0x80f + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0xbf}, + // Block 0x117, offset 0x813 + {value: 0x0000, lo: 0x0d}, + {value: 0x0018, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xaf}, + {value: 0x1f41, lo: 0xb0, hi: 0xb0}, + {value: 0x00c9, lo: 0xb1, hi: 0xb1}, + {value: 0x0069, lo: 0xb2, hi: 0xb2}, + {value: 0x0079, lo: 0xb3, hi: 0xb3}, + {value: 0x1f51, lo: 0xb4, hi: 0xb4}, + {value: 0x1f61, lo: 0xb5, hi: 0xb5}, + {value: 0x1f71, lo: 0xb6, hi: 0xb6}, + {value: 0x1f81, lo: 0xb7, hi: 0xb7}, + {value: 0x1f91, lo: 0xb8, hi: 0xb8}, + {value: 0x1fa1, lo: 0xb9, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x118, offset 0x821 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x119, offset 0x824 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x11a, offset 0x827 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x11b, offset 0x82b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x11c, offset 0x82f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x11d, offset 0x832 + {value: 0x0020, lo: 0x0f}, + {value: 0xdf21, lo: 0x80, hi: 0x89}, + {value: 0x8e35, lo: 0x8a, hi: 0x8a}, + {value: 0xe061, lo: 0x8b, hi: 0x9c}, + {value: 0x8e55, lo: 0x9d, hi: 0x9d}, + {value: 0xe2a1, lo: 0x9e, hi: 0xa2}, + {value: 0x8e75, lo: 0xa3, hi: 0xa3}, + {value: 0xe341, lo: 0xa4, hi: 0xab}, + {value: 0x7f0d, lo: 0xac, hi: 0xac}, + {value: 0xe441, lo: 0xad, hi: 0xaf}, + {value: 0x8e95, lo: 0xb0, hi: 0xb0}, + {value: 0xe4a1, lo: 0xb1, hi: 0xb6}, + {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, + {value: 0xe561, lo: 0xba, hi: 0xba}, + {value: 0x8f15, lo: 0xbb, hi: 0xbb}, + {value: 0xe581, lo: 0xbc, hi: 0xbf}, + // Block 0x11e, offset 0x842 + {value: 0x0020, lo: 0x10}, + {value: 0x93b5, lo: 0x80, hi: 0x80}, + {value: 0xf101, lo: 0x81, hi: 0x86}, + {value: 0x93d5, lo: 0x87, hi: 0x8a}, + {value: 0xda61, lo: 0x8b, hi: 0x8b}, + {value: 0xf1c1, lo: 0x8c, hi: 0x96}, + {value: 0x9455, lo: 0x97, hi: 0x97}, + {value: 0xf321, lo: 0x98, hi: 0xa3}, + {value: 0x9475, lo: 0xa4, hi: 0xa6}, + {value: 0xf4a1, lo: 0xa7, hi: 0xaa}, + {value: 0x94d5, lo: 0xab, hi: 0xab}, + {value: 0xf521, lo: 0xac, hi: 0xac}, + {value: 0x94f5, lo: 0xad, hi: 0xad}, + {value: 0xf541, lo: 0xae, hi: 0xaf}, + {value: 0x9515, lo: 0xb0, hi: 0xb1}, + {value: 0xf581, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x11f, offset 0x853 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0xbf}, + // Block 0x120, offset 0x856 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x121, offset 0x85b + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x122, offset 0x85d + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x123, offset 0x85f + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 43370 bytes (42KiB); checksum: EBD909C0 diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 0a73e277e0950..0cde35a4401fa 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index 8328b7d1942f9..83c35ecdcc4e0 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go new file mode 100644 index 0000000000000..98be146bc51dc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = int32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} + +func controlHeaderLen() int { + return syscall.CmsgLen(0) +} + +func controlMessageLen(dataLen int) int { + return syscall.CmsgLen(dataLen) +} + +func controlMessageSpace(dataLen int) int { + return syscall.CmsgSpace(dataLen) +} diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go index f14872d3d3575..47f0d6e2de533 100644 --- a/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index dfeda752be28a..0309e10fecd74 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build arm64 amd64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build aix darwin dragonfly freebsd linux netbsd openbsd +// +build aix darwin dragonfly freebsd linux netbsd openbsd zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go index a746e90e3073f..f44d4f523321e 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index 873490a7ae9d4..1a253d2d6091d 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go new file mode 100644 index 0000000000000..eb1a99a3342da --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go @@ -0,0 +1,36 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x +// +build zos + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index d5ae3f8e143d2..610b1a1a5e323 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket import ( "os" + "runtime" "syscall" ) @@ -24,7 +25,7 @@ func (c *Conn) recvMsg(m *Message, flags int) error { var n int fn := func(s uintptr) bool { n, operr = recvmsg(s, &h, flags) - if operr == syscall.EAGAIN { + if operr == syscall.EAGAIN || (runtime.GOOS == "zos" && operr == syscall.EWOULDBLOCK) { return false } return true @@ -61,7 +62,7 @@ func (c *Conn) sendMsg(m *Message, flags int) error { var n int fn := func(s uintptr) bool { n, operr = sendmsg(s, &h, flags) - if operr == syscall.EAGAIN { + if operr == syscall.EAGAIN || (runtime.GOOS == "zos" && operr == syscall.EWOULDBLOCK) { return false } return true diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index b8cea6fe5343c..e51b60d15107e 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_zos.go b/vendor/golang.org/x/net/internal/socket/sys_const_zos.go new file mode 100644 index 0000000000000..01b6372039aff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_const_zos.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build zos + +package socket + +import "syscall" + +const ( + sysAF_UNSPEC = syscall.AF_UNSPEC + sysAF_INET = syscall.AF_INET + sysAF_INET6 = syscall.AF_INET6 + + sysSOCK_RAW = syscall.SOCK_RAW +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 22eae809c9e12..05ded237ec93b 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 8e1e07427b947..3c97008da4035 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go new file mode 100644 index 0000000000000..1e38b9223281f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) +func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func probeProtocolStack() int { + return 4 // sizeof(int) on GOOS=zos GOARCH=s390x +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall_syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall_syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.s new file mode 100644 index 0000000000000..60d5839c25b65 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·syscall_syscall(SB),NOSPLIT,$0 + JMP syscall·_syscall(SB) + +TEXT ·syscall_syscall6(SB),NOSPLIT,$0 + JMP syscall·_syscall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_zos_s390x.go new file mode 100644 index 0000000000000..514ca3754d4c6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_zos_s390x.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Iov *iovec + Control *byte + Flags int32 + Namelen uint32 + Iovlen int32 + Controllen uint32 +} + +type cmsghdr struct { + Len int32 + Level int32 + Type int32 +} + +const ( + sizeofCmsghdr = 12 + sizeofSockaddrInet = 16 + sizeofSockaddrInet6 = 28 +) diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go index a0c049d683ade..9d8253479cec4 100644 --- a/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_zos.go b/vendor/golang.org/x/net/ipv4/control_zos.go new file mode 100644 index 0000000000000..04420003e0261 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_zos.go @@ -0,0 +1,86 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index e7614661d7b75..7bde68947fa74 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 1116256f24576..251bd0c70867f 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go index dea64519d8e26..ef29718d15705 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go index 37d4806b34297..fbca52684a5ea 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go index b9c85b334fcc4..555585188f4aa 100644 --- a/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_zos.go b/vendor/golang.org/x/net/ipv4/sys_zos.go new file mode 100644 index 0000000000000..7426606cff3c2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_zos.go @@ -0,0 +1,55 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet4)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + sa.Len = sizeofSockaddrInet4 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet4)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + sa.Len = sizeofSockaddrInet4 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet4)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + sa.Len = sizeofSockaddrInet4 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_zos_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_zos_s390x.go new file mode 100644 index 0000000000000..4bbfda07dc3a2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_zos_s390x.go @@ -0,0 +1,80 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hand edited based on zerrors_zos_s390x.go +// TODO(Bill O'Farrell): auto-generate. + +package ipv4 + +const ( + sysIP_ADD_MEMBERSHIP = 5 + sysIP_ADD_SOURCE_MEMBERSHIP = 12 + sysIP_BLOCK_SOURCE = 10 + sysIP_DEFAULT_MULTICAST_LOOP = 1 + sysIP_DEFAULT_MULTICAST_TTL = 1 + sysIP_DROP_MEMBERSHIP = 6 + sysIP_DROP_SOURCE_MEMBERSHIP = 13 + sysIP_MAX_MEMBERSHIPS = 20 + sysIP_MULTICAST_IF = 7 + sysIP_MULTICAST_LOOP = 4 + sysIP_MULTICAST_TTL = 3 + sysIP_OPTIONS = 1 + sysIP_PKTINFO = 101 + sysIP_RECVPKTINFO = 102 + sysIP_TOS = 2 + sysIP_UNBLOCK_SOURCE = 11 + + sysMCAST_JOIN_GROUP = 40 + sysMCAST_LEAVE_GROUP = 41 + sysMCAST_JOIN_SOURCE_GROUP = 42 + sysMCAST_LEAVE_SOURCE_GROUP = 43 + sysMCAST_BLOCK_SOURCE = 44 + sysMCAST_UNBLOCK_SOURCE = 45 + + sizeofIPMreq = 8 + sizeofSockaddrInet4 = 16 + sizeofSockaddrStorage = 128 + sizeofGroupReq = 136 + sizeofGroupSourceReq = 264 + sizeofInetPktinfo = 8 +) + +type sockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte + Zero [8]uint8 +} + +type inetPktinfo struct { + Addr [4]byte + Ifindex uint32 +} + +type sockaddrStorage struct { + Len uint8 + Family byte + ss_pad1 [6]byte + ss_align int64 + ss_pad2 [112]byte +} + +type groupReq struct { + Interface uint32 + reserved uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + reserved uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index 8c221b598957f..77c449eceb3ef 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go index 1d773cbcc8e69..e50b8864a24cd 100644 --- a/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go index 0971a008bf4f9..ec31ca2adc715 100644 --- a/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go index 370e51acd1fb3..297e191f5758b 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_zos.go b/vendor/golang.org/x/net/ipv6/icmp_zos.go new file mode 100644 index 0000000000000..ddf8f093fc412 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_zos.go @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) + +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) + +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index 284a04278ed02..8baa26e7731bd 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index c5a4c967527d7..00c4f58754763 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 824c623ccefd5..f718792782e4b 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go index 0a87a93bbd4db..d87db6a0dcd99 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index 9b52e978cbd9d..88d64f17ebdf0 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin freebsd linux solaris +// +build aix darwin freebsd linux solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index d5bc1108c5f2e..b0708868fe9dc 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!freebsd,!linux,!solaris +// +build !aix,!darwin,!freebsd,!linux,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go index 4f252d09f6cb7..cbe4a029c36dd 100644 --- a/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_zos.go b/vendor/golang.org/x/net/ipv6/sys_zos.go new file mode 100644 index 0000000000000..d4567f908fa89 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_zos.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + sa.Len = sizeofSockaddrInet6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + sa.Len = sizeofSockaddrInet6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + sa.Len = sizeofSockaddrInet6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_zos_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_zos_s390x.go new file mode 100644 index 0000000000000..3f980691f6be1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_zos_s390x.go @@ -0,0 +1,106 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hand edited based on zerrors_zos_s390x.go +// TODO(Bill O'Farrell): auto-generate. + +package ipv6 + +const ( + sysIPV6_ADDR_PREFERENCES = 32 + sysIPV6_CHECKSUM = 19 + sysIPV6_DONTFRAG = 29 + sysIPV6_DSTOPTS = 23 + sysIPV6_HOPLIMIT = 11 + sysIPV6_HOPOPTS = 22 + sysIPV6_JOIN_GROUP = 5 + sysIPV6_LEAVE_GROUP = 6 + sysIPV6_MULTICAST_HOPS = 9 + sysIPV6_MULTICAST_IF = 7 + sysIPV6_MULTICAST_LOOP = 4 + sysIPV6_NEXTHOP = 20 + sysIPV6_PATHMTU = 12 + sysIPV6_PKTINFO = 13 + sysIPV6_PREFER_SRC_CGA = 0x10 + sysIPV6_PREFER_SRC_COA = 0x02 + sysIPV6_PREFER_SRC_HOME = 0x01 + sysIPV6_PREFER_SRC_NONCGA = 0x20 + sysIPV6_PREFER_SRC_PUBLIC = 0x08 + sysIPV6_PREFER_SRC_TMP = 0x04 + sysIPV6_RECVDSTOPTS = 28 + sysIPV6_RECVHOPLIMIT = 14 + sysIPV6_RECVHOPOPTS = 26 + sysIPV6_RECVPATHMTU = 16 + sysIPV6_RECVPKTINFO = 15 + sysIPV6_RECVRTHDR = 25 + sysIPV6_RECVTCLASS = 31 + sysIPV6_RTHDR = 21 + sysIPV6_RTHDRDSTOPTS = 24 + sysIPV6_RTHDR_TYPE_0 = 0 + sysIPV6_TCLASS = 30 + sysIPV6_UNICAST_HOPS = 3 + sysIPV6_USE_MIN_MTU = 18 + sysIPV6_V6ONLY = 10 + + sysMCAST_JOIN_GROUP = 40 + sysMCAST_LEAVE_GROUP = 41 + sysMCAST_JOIN_SOURCE_GROUP = 42 + sysMCAST_LEAVE_SOURCE_GROUP = 43 + sysMCAST_BLOCK_SOURCE = 44 + sysMCAST_UNBLOCK_SOURCE = 45 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 128 + sizeofICMPv6Filter = 32 + sizeofInet6Pktinfo = 20 + sizeofIPv6Mtuinfo = 32 + sizeofSockaddrInet6 = 28 + sizeofGroupReq = 136 + sizeofGroupSourceReq = 264 +) + +type sockaddrStorage struct { + Len uint8 + Family byte + ss_pad1 [6]byte + ss_align int64 + ss_pad2 [112]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type groupReq struct { + Interface uint32 + reserved uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + reserved uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s index 06f84b8555800..6b4027b33fd24 100644 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 951078f2e82e2..87dd5e30215b1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -39,31 +39,34 @@ func initOptions() { func archInit() { switch runtime.GOOS { - case "android", "darwin", "ios", "netbsd": - // Android and iOS don't seem to allow reading these registers. - // - // NetBSD: - // ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0. - // It can be read via sysctl(3). Example for future implementers: - // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c - // - // Fake the minimal features expected by - // TestARM64minimalFeatures. - ARM64.HasASIMD = true - ARM64.HasFP = true - case "linux": + case "freebsd": + readARM64Registers() + case "linux", "netbsd": doinit() default: - readARM64Registers() + // Most platforms don't seem to allow reading these registers. + // + // OpenBSD: + // See https://golang.org/issue/31746 + setMinimalFeatures() } } +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + func readARM64Registers() { Initialized = true - // ID_AA64ISAR0_EL1 - isar0 := getisar0() + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: ARM64.HasAES = true @@ -121,8 +124,6 @@ func readARM64Registers() { } // ID_AA64ISAR1_EL1 - isar1 := getisar1() - switch extractBits(isar1, 0, 3) { case 1: ARM64.HasDCPOP = true @@ -144,8 +145,6 @@ func readARM64Registers() { } // ID_AA64PFR0_EL1 - pfr0 := getpfr0() - switch extractBits(pfr0, 16, 19) { case 0: ARM64.HasFP = true diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index a54436e39095a..cfc08c979439d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index 7b88e865a4280..7f7f272a014f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index 568bcd031aa41..75a95566161d0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index f7cb46971cb05..4adb89cf9cc84 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build 386 amd64 amd64p32 -// +build !gccgo +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go index b88d6b8f662bd..1517ac61d31b5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -17,86 +17,7 @@ const ( hwcap_VXE = 8192 ) -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // cryptography facilities - msa4 facility = 77 // message-security-assist extension 4 - msa8 facility = 146 // message-security-assist extension 8 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { +func initS390Xbase() { // test HWCAP bit vector has := func(featureMask uint) bool { return hwCap&featureMask == featureMask @@ -116,44 +37,4 @@ func doinit() { if S390X.HasVX { S390X.HasVXE = has(hwcap_VXE) } - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000000000..ebfb3fc8e76d2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index 3ffc4afa03ce2..16c1c4090ee2f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !linux,arm64 +// +build !linux,!netbsd +// +build arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 0000000000000..f49fad67783e6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux +// +build mips64 mips64le + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go index 544cd621ceea4..5881b8833f5a5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -8,10 +8,10 @@ const cacheLineSize = 256 func initOptions() { options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH}, - {Name: "stfle", Feature: &S390X.HasSTFLE}, - {Name: "ldisp", Feature: &S390X.HasLDISP}, - {Name: "eimm", Feature: &S390X.HasEIMM}, + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, {Name: "dfp", Feature: &S390X.HasDFP}, {Name: "etf3eh", Feature: &S390X.HasETF3EH}, {Name: "msa", Feature: &S390X.HasMSA}, @@ -28,3 +28,145 @@ func initOptions() { {Name: "vxe", Feature: &S390X.HasVXE}, } } + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s index e5037d92e0612..964946df9571b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index 47f084128cc3f..2f557a5887a4f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build 386 amd64 amd64p32 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_zos.go b/vendor/golang.org/x/sys/cpu/cpu_zos.go new file mode 100644 index 0000000000000..5f54683a22e3e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_zos.go @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func archInit() { + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go new file mode 100644 index 0000000000000..ccb1b708aba98 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func initS390Xbase() { + // get the facilities list + facilities := stfle() + + // mandatory + S390X.HasZARCH = facilities.Has(zarch) + S390X.HasSTFLE = facilities.Has(stflef) + S390X.HasLDISP = facilities.Has(ldisp) + S390X.HasEIMM = facilities.Has(eimm) + + // optional + S390X.HasETF3EH = facilities.Has(etf3eh) + S390X.HasDFP = facilities.Has(dfp) + S390X.HasMSA = facilities.Has(msa) + S390X.HasVX = facilities.Has(vx) + if S390X.HasVX { + S390X.HasVXE = facilities.Has(vxe) + } +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index 78fe25e86fbbe..5b427d67e2f7b 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -7,7 +7,7 @@ // (See golang.org/issue/32102) // +build aix,ppc64 -// +build !gccgo +// +build gc package cpu diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index 06f84b8555800..6b4027b33fd24 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s index 8a7278319e319..8a06b87d715a0 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s index 6321421f27263..f2397fde554de 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s index 333242d506197..c9e6b6fc8b55c 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s index 97e0174371868..89843f8f4b29c 100644 --- a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc // +build arm64,darwin #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s index 603dd5728c4a4..27674e1cafd79 100644 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s index c9a0a2601562f..49f0ac2364cbc 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s index 35172477c8697..f2dfc57b836f5 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s index 9227c875bfebb..6d740db2c0c70 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s index d9318cbf034d8..a8f5a29b35f26 100644 --- a/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 448bebbb59af4..0655ecbfbbecb 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index c6468a9588022..bc3fb6ac3ed2e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index cf0f3575c1339..55b13c7ba45c4 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index afe6fdf6b1114..22a83d8e3fad6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -4,7 +4,7 @@ // +build linux // +build arm64 -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index ab9d63831a75a..dc222b90ce74f 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -4,7 +4,7 @@ // +build linux // +build mips64 mips64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 99e5399045c29..d333f13cff3b7 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -4,7 +4,7 @@ // +build linux // +build mips mipsle -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 88f712557810a..459a629c2732f 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -4,7 +4,7 @@ // +build linux // +build ppc64 ppc64le -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 3cfefed2ec042..04d38497c6dda 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build riscv64,!gccgo +// +build riscv64,gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index a5a863c6bd75c..cc303989e1741 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -4,7 +4,7 @@ // +build s390x // +build linux -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s index 48bdcd7632aff..ae7b498d50684 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s index 2ede05c72f09d..e57367c17aa73 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s index e8928571c450e..d7da175e1a3fb 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s index 6f98ba5a370a2..e7cbe1904c4e6 100644 --- a/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s index 00576f3c83559..2f00b0310f434 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s index 790ef77f86ef7..07632c99ceaa8 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s index 469bfa10039a0..73e997320fc7a 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s index 0cedea3d39d8e..c47302aa46df9 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 567a4763c88a7..47c93fcb6c765 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index ded8260f3e409..1f2c755a72035 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !gccgo +// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index 5e9269063f5b1..86781eac2210d 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build ppc64 s390x mips mips64 +// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index bcdb5d30eb9b6..8822d8541f23c 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 +// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index fc0e50e037214..8db48e5e06266 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -1,9 +1,9 @@ -// +build linux,386 linux,arm linux,mips linux,mipsle - // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build linux,386 linux,arm linux,mips linux,mipsle + package unix func init() { diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 0d72dbb857018..c0f9f2d523f53 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -59,12 +59,14 @@ includes_Darwin=' #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -223,6 +225,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -231,6 +234,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -373,6 +377,7 @@ includes_SunOS=' #include #include #include +#include #include #include #include @@ -497,6 +502,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || $2 ~ /^TCGET/ || @@ -517,6 +523,7 @@ ccflags="$@" $2 ~ /^CAP_/ || $2 ~ /^CP_/ || $2 ~ /^CPUSTATES$/ || + $2 ~ /^CTLIOCGINFO$/ || $2 ~ /^ALG_/ || $2 ~ /^FI(CLONE|DEDUPERANGE)/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || @@ -555,6 +562,7 @@ ccflags="$@" $2 ~ /^CRYPTO_/ || $2 ~ /^TIPC_/ || $2 ~ /^DEVLINK_/ || + $2 ~ /^LWTUNNEL_IP/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index fd4ee8ebeb707..ab75ef9cc621e 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -24,7 +24,13 @@ // holds a value of type syscall.Errno. package unix // import "golang.org/x/sys/unix" -import "strings" +import ( + "bytes" + "strings" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any @@ -49,5 +55,40 @@ func BytePtrFromString(s string) (*byte, error) { return &a[0], nil } +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + var s []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(s) +} + // Single-word zero for use when we need a valid pointer to 0 bytes. var _zero uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9ad8a0d4a5668..4408153822dbb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -19,6 +19,22 @@ import "unsafe" * Wrapped */ +func Access(path string, mode uint32) (err error) { + return Faccessat(AT_FDCWD, path, mode, 0) +} + +func Chmod(path string, mode uint32) (err error) { + return Fchmodat(AT_FDCWD, path, mode, 0) +} + +func Chown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, 0) +} + +func Creat(path string, mode uint32) (fd int, err error) { + return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode) +} + //sys utimes(path string, times *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 123536a028ca3..bc634a280a08c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -277,7 +277,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil } - return nil, EAFNOSUPPORT + return anyToSockaddrGOOS(fd, rsa) } func Accept(fd int) (nfd int, sa Sockaddr, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 21b8092cdd16b..b625738900878 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,6 +13,7 @@ package unix import ( + "runtime" "syscall" "unsafe" ) @@ -30,10 +31,40 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +// SockaddrCtl implements the Sockaddr interface for AF_SYSTEM type sockets. +type SockaddrCtl struct { + ID uint32 + Unit uint32 + raw RawSockaddrCtl +} + +func (sa *SockaddrCtl) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sc_len = SizeofSockaddrCtl + sa.raw.Sc_family = AF_SYSTEM + sa.raw.Ss_sysaddr = AF_SYS_CONTROL + sa.raw.Sc_id = sa.ID + sa.raw.Sc_unit = sa.Unit + return unsafe.Pointer(&sa.raw), SizeofSockaddrCtl, nil +} + +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_SYSTEM: + pp := (*RawSockaddrCtl)(unsafe.Pointer(rsa)) + if pp.Ss_sysaddr == AF_SYS_CONTROL { + sa := new(SockaddrCtl) + sa.ID = pp.Sc_id + sa.Unit = pp.Sc_unit + return sa, nil + } + } + return nil, EAFNOSUPPORT +} + // Some external packages rely on SYS___SYSCTL being defined to implement their // own sysctl wrappers. Provide it here, even though direct syscalls are no // longer supported on darwin. -const SYS___SYSCTL = 202 +const SYS___SYSCTL = SYS_SYSCTL // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { @@ -257,6 +288,35 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) +func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { + err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) + runtime.KeepAlive(ctlInfo) + return err +} + +// IfreqMTU is struct ifreq used to get or set a network device's MTU. +type IfreqMTU struct { + Name [IFNAMSIZ]byte + MTU int32 +} + +// IoctlGetIfreqMTU performs the SIOCGIFMTU ioctl operation on fd to get the MTU +// of the network device specified by ifname. +func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { + var ifreq IfreqMTU + copy(ifreq.Name[:], ifname) + err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + return &ifreq, err +} + +// IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU +// of the network device specified by ifreq.Name. +func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { + err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) + runtime.KeepAlive(ifreq) + return err +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index ea0be1e92917a..6c1f4ab95b477 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} @@ -49,5 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 58624044843ce..0582ae256ef43 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -6,11 +6,7 @@ package unix -import ( - "syscall" -) - -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -49,5 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index b8b3141819123..c6a9733b4cb6e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -4,9 +4,7 @@ package unix -import ( - "syscall" -) +import "syscall" func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 67413983735fa..253afa4de55cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -6,13 +6,7 @@ package unix -import ( - "syscall" -) - -func ptrace(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} +import "syscall" func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} @@ -51,5 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index bed7dcfec116e..a4f2944a24eab 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -47,6 +47,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) @@ -101,6 +105,19 @@ func Pipe(p []int) (err error) { return } +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) error { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err := pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return err +} + //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) func Pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index f6db02aff401f..acc00c2e6a10a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -54,6 +54,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 048d18e3c810e..c97c2ee53e5f5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// TODO(rsc): Rewrite all nn(SP) references into name+(nn-8)(FP) -// so that go vet can check that they are correct. - // +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 21a4946ba553e..baa771f8ad982 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build amd64,linux -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index c26e6ec2314a8..9edf3961b010c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo +// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 070bd38994ecf..90e33d8cf751a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!gccgo,386 +// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index 8c514c95ed4fb..1a97baae732ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm,!gccgo,linux +// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index dbd5e03b62727..1e6843b4c3dac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -31,6 +31,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 2c1f46ea1eff5..6a50b50bd6924 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -31,6 +31,10 @@ type SockaddrDatalink struct { raw RawSockaddrDatalink } +func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { + return nil, EAFNOSUPPORT +} + func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func nametomib(name string) (mib []_C_int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 1c70d1b6902b1..87bd161cefc98 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !gccgo,!ppc64le,!ppc64 +// +build gc,!ppc64le,!ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index 86dc765aba3e6..d36216c3ca73a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -4,7 +4,7 @@ // +build linux // +build ppc64le ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 6f333594bbd19..ec376f51bc423 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -251,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index db767eb257f64..fea5dfaadb9b8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -251,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index ddc5d001b6951..03feefbf8c92e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -251,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 0614d26d01e74..b40fb1f69675f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -45,6 +45,7 @@ const ( AF_SIP = 0x18 AF_SNA = 0xb AF_SYSTEM = 0x20 + AF_SYS_CONTROL = 0x2 AF_UNIX = 0x1 AF_UNSPEC = 0x0 AF_UTUN = 0x26 @@ -251,6 +252,7 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + CTLIOCGINFO = 0xc0644e03 CTL_HW = 0x6 CTL_KERN = 0x1 CTL_MAXNAME = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6232b2eb88ba7..b46110354dfbe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -324,6 +324,7 @@ const ( CAP_AUDIT_WRITE = 0x1d CAP_BLOCK_SUSPEND = 0x24 CAP_BPF = 0x27 + CAP_CHECKPOINT_RESTORE = 0x28 CAP_CHOWN = 0x0 CAP_DAC_OVERRIDE = 0x1 CAP_DAC_READ_SEARCH = 0x2 @@ -332,7 +333,7 @@ const ( CAP_IPC_LOCK = 0xe CAP_IPC_OWNER = 0xf CAP_KILL = 0x5 - CAP_LAST_CAP = 0x27 + CAP_LAST_CAP = 0x28 CAP_LEASE = 0x1c CAP_LINUX_IMMUTABLE = 0x9 CAP_MAC_ADMIN = 0x21 @@ -650,8 +651,8 @@ const ( FAN_DELETE = 0x200 FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 - FAN_DIR_MODIFY = 0x80000 FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 @@ -679,7 +680,10 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 + FAN_REPORT_NAME = 0x800 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1213,6 +1217,12 @@ const ( LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc LO_KEY_SIZE = 0x20 LO_NAME_SIZE = 0x40 + LWTUNNEL_IP6_MAX = 0x8 + LWTUNNEL_IP_MAX = 0x8 + LWTUNNEL_IP_OPTS_MAX = 0x3 + LWTUNNEL_IP_OPT_ERSPAN_MAX = 0x4 + LWTUNNEL_IP_OPT_GENEVE_MAX = 0x3 + LWTUNNEL_IP_OPT_VXLAN_MAX = 0x1 MADV_COLD = 0x14 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb @@ -1980,6 +1990,7 @@ const ( RTPROT_EIGRP = 0xc0 RTPROT_GATED = 0x8 RTPROT_ISIS = 0xbb + RTPROT_KEEPALIVED = 0x12 RTPROT_KERNEL = 0x2 RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa @@ -2170,6 +2181,7 @@ const ( SO_EE_ORIGIN_TXSTATUS = 0x4 SO_EE_ORIGIN_TXTIME = 0x6 SO_EE_ORIGIN_ZEROCOPY = 0x5 + SO_EE_RFC4884_FLAG_INVALID = 0x1 SO_GET_FILTER = 0x1a SO_NO_CHECK = 0xb SO_PEERNAME = 0x1c diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 46e054ccb0e0d..5312c36cc8286 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -192,6 +192,12 @@ const ( CSTOPB = 0x40 CSUSP = 0x1a CSWTCH = 0x1a + DIOC = 0x6400 + DIOCGETB = 0x6402 + DIOCGETC = 0x6401 + DIOCGETP = 0x6408 + DIOCSETE = 0x6403 + DIOCSETP = 0x6409 DLT_AIRONET_HEADER = 0x78 DLT_APPLE_IP_OVER_IEEE1394 = 0x8a DLT_ARCNET = 0x7 @@ -290,6 +296,7 @@ const ( FF0 = 0x0 FF1 = 0x8000 FFDLY = 0x8000 + FIORDCHK = 0x6603 FLUSHALL = 0x1 FLUSHDATA = 0x0 FLUSHO = 0x2000 @@ -645,6 +652,14 @@ const ( MAP_SHARED = 0x1 MAP_TEXT = 0x400 MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x29 + MCAST_JOIN_SOURCE_GROUP = 0x2d + MCAST_LEAVE_GROUP = 0x2a + MCAST_LEAVE_SOURCE_GROUP = 0x2e + MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MSG_CTRUNC = 0x10 @@ -653,6 +668,7 @@ const ( MSG_DUPCTRL = 0x800 MSG_EOR = 0x8 MSG_MAXIOVLEN = 0x10 + MSG_NOSIGNAL = 0x200 MSG_NOTIFICATION = 0x100 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -687,6 +703,7 @@ const ( O_APPEND = 0x8 O_CLOEXEC = 0x800000 O_CREAT = 0x100 + O_DIRECTORY = 0x1000000 O_DSYNC = 0x40 O_EXCL = 0x400 O_EXEC = 0x400000 @@ -725,7 +742,7 @@ const ( RLIMIT_FSIZE = 0x1 RLIMIT_NOFILE = 0x5 RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x3 + RLIM_INFINITY = 0xfffffffffffffffd RTAX_AUTHOR = 0x6 RTAX_BRD = 0x7 RTAX_DST = 0x0 @@ -1047,6 +1064,7 @@ const ( TCOON = 0x1 TCP_ABORT_THRESHOLD = 0x11 TCP_ANONPRIVBIND = 0x20 + TCP_CONGESTION = 0x25 TCP_CONN_ABORT_THRESHOLD = 0x13 TCP_CONN_NOTIFY_THRESHOLD = 0x12 TCP_CORK = 0x18 @@ -1076,6 +1094,8 @@ const ( TCSETSF = 0x5410 TCSETSW = 0x540f TCXONC = 0x5406 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOC = 0x5400 TIOCCBRK = 0x747a TIOCCDTR = 0x7478 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 4b3a8ad7bec18..0550da06d1472 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,7 @@ // Code generated by the command above; see README.md. DO NOT EDIT. // +build aix,ppc64 -// +build !gccgo +// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index bd13b38567591..6eb457983232d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -2434,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2535,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index d5fb53fd17367..1c53979a101e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -272,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -284,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index d81696f9eaa43..889c14059e9a2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2434,21 +2434,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2535,6 +2520,21 @@ func libc_lstat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 887fd5f4ef8da..c77bd6e20bdcf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -272,8 +272,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 @@ -284,6 +282,8 @@ TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 08638436c5776..23b65a5301ad5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2520,6 +2520,21 @@ func libc_lstat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 16aebee23aa10..53c402bf68b58 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -282,6 +282,8 @@ TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 JMP libc_stat(SB) TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index aebfe511ad52f..1aaccd3615eea 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -362,6 +362,16 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func extpread(fd int, p []byte, flags int, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go new file mode 100644 index 0000000000000..ad62324c7c14b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go @@ -0,0 +1,437 @@ +// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build 386,darwin + +package unix + +// Deprecated: Use libSystem wrappers instead of direct syscalls. +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go new file mode 100644 index 0000000000000..a2fc91d6a8007 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -0,0 +1,439 @@ +// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build amd64,darwin + +package unix + +// Deprecated: Use libSystem wrappers instead of direct syscalls. +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_KQUEUE_WORKLOOP_CTL = 530 + SYS___MACH_BRIDGE_REMOTE_TIME = 531 + SYS_MAXSYSCALL = 532 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go new file mode 100644 index 0000000000000..20d7808ace3d6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go @@ -0,0 +1,437 @@ +// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm,darwin + +package unix + +// Deprecated: Use libSystem wrappers instead of direct syscalls. +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go new file mode 100644 index 0000000000000..527b9588cc969 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -0,0 +1,437 @@ +// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build arm64,darwin + +package unix + +// Deprecated: Use libSystem wrappers instead of direct syscalls. +const ( + SYS_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_CHDIR = 12 + SYS_FCHDIR = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_CHOWN = 16 + SYS_GETFSSTAT = 18 + SYS_GETPID = 20 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_GETEUID = 25 + SYS_PTRACE = 26 + SYS_RECVMSG = 27 + SYS_SENDMSG = 28 + SYS_RECVFROM = 29 + SYS_ACCEPT = 30 + SYS_GETPEERNAME = 31 + SYS_GETSOCKNAME = 32 + SYS_ACCESS = 33 + SYS_CHFLAGS = 34 + SYS_FCHFLAGS = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_GETPPID = 39 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_GETEGID = 43 + SYS_SIGACTION = 46 + SYS_GETGID = 47 + SYS_SIGPROCMASK = 48 + SYS_GETLOGIN = 49 + SYS_SETLOGIN = 50 + SYS_ACCT = 51 + SYS_SIGPENDING = 52 + SYS_SIGALTSTACK = 53 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_REVOKE = 56 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETPGID = 82 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_GETDTABLESIZE = 89 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_GETPRIORITY = 100 + SYS_BIND = 104 + SYS_SETSOCKOPT = 105 + SYS_LISTEN = 106 + SYS_SIGSUSPEND = 111 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_FLOCK = 131 + SYS_MKFIFO = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_FUTIMES = 139 + SYS_ADJTIME = 140 + SYS_GETHOSTUUID = 142 + SYS_SETSID = 147 + SYS_GETPGID = 151 + SYS_SETPRIVEXEC = 152 + SYS_PREAD = 153 + SYS_PWRITE = 154 + SYS_NFSSVC = 155 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UNMOUNT = 159 + SYS_GETFH = 161 + SYS_QUOTACTL = 165 + SYS_MOUNT = 167 + SYS_CSOPS = 169 + SYS_CSOPS_AUDITTOKEN = 170 + SYS_WAITID = 173 + SYS_KDEBUG_TYPEFILTER = 177 + SYS_KDEBUG_TRACE_STRING = 178 + SYS_KDEBUG_TRACE64 = 179 + SYS_KDEBUG_TRACE = 180 + SYS_SETGID = 181 + SYS_SETEGID = 182 + SYS_SETEUID = 183 + SYS_SIGRETURN = 184 + SYS_THREAD_SELFCOUNTS = 186 + SYS_FDATASYNC = 187 + SYS_STAT = 188 + SYS_FSTAT = 189 + SYS_LSTAT = 190 + SYS_PATHCONF = 191 + SYS_FPATHCONF = 192 + SYS_GETRLIMIT = 194 + SYS_SETRLIMIT = 195 + SYS_GETDIRENTRIES = 196 + SYS_MMAP = 197 + SYS_LSEEK = 199 + SYS_TRUNCATE = 200 + SYS_FTRUNCATE = 201 + SYS_SYSCTL = 202 + SYS_MLOCK = 203 + SYS_MUNLOCK = 204 + SYS_UNDELETE = 205 + SYS_OPEN_DPROTECTED_NP = 216 + SYS_GETATTRLIST = 220 + SYS_SETATTRLIST = 221 + SYS_GETDIRENTRIESATTR = 222 + SYS_EXCHANGEDATA = 223 + SYS_SEARCHFS = 225 + SYS_DELETE = 226 + SYS_COPYFILE = 227 + SYS_FGETATTRLIST = 228 + SYS_FSETATTRLIST = 229 + SYS_POLL = 230 + SYS_WATCHEVENT = 231 + SYS_WAITEVENT = 232 + SYS_MODWATCH = 233 + SYS_GETXATTR = 234 + SYS_FGETXATTR = 235 + SYS_SETXATTR = 236 + SYS_FSETXATTR = 237 + SYS_REMOVEXATTR = 238 + SYS_FREMOVEXATTR = 239 + SYS_LISTXATTR = 240 + SYS_FLISTXATTR = 241 + SYS_FSCTL = 242 + SYS_INITGROUPS = 243 + SYS_POSIX_SPAWN = 244 + SYS_FFSCTL = 245 + SYS_NFSCLNT = 247 + SYS_FHOPEN = 248 + SYS_MINHERIT = 250 + SYS_SEMSYS = 251 + SYS_MSGSYS = 252 + SYS_SHMSYS = 253 + SYS_SEMCTL = 254 + SYS_SEMGET = 255 + SYS_SEMOP = 256 + SYS_MSGCTL = 258 + SYS_MSGGET = 259 + SYS_MSGSND = 260 + SYS_MSGRCV = 261 + SYS_SHMAT = 262 + SYS_SHMCTL = 263 + SYS_SHMDT = 264 + SYS_SHMGET = 265 + SYS_SHM_OPEN = 266 + SYS_SHM_UNLINK = 267 + SYS_SEM_OPEN = 268 + SYS_SEM_CLOSE = 269 + SYS_SEM_UNLINK = 270 + SYS_SEM_WAIT = 271 + SYS_SEM_TRYWAIT = 272 + SYS_SEM_POST = 273 + SYS_SYSCTLBYNAME = 274 + SYS_OPEN_EXTENDED = 277 + SYS_UMASK_EXTENDED = 278 + SYS_STAT_EXTENDED = 279 + SYS_LSTAT_EXTENDED = 280 + SYS_FSTAT_EXTENDED = 281 + SYS_CHMOD_EXTENDED = 282 + SYS_FCHMOD_EXTENDED = 283 + SYS_ACCESS_EXTENDED = 284 + SYS_SETTID = 285 + SYS_GETTID = 286 + SYS_SETSGROUPS = 287 + SYS_GETSGROUPS = 288 + SYS_SETWGROUPS = 289 + SYS_GETWGROUPS = 290 + SYS_MKFIFO_EXTENDED = 291 + SYS_MKDIR_EXTENDED = 292 + SYS_IDENTITYSVC = 293 + SYS_SHARED_REGION_CHECK_NP = 294 + SYS_VM_PRESSURE_MONITOR = 296 + SYS_PSYNCH_RW_LONGRDLOCK = 297 + SYS_PSYNCH_RW_YIELDWRLOCK = 298 + SYS_PSYNCH_RW_DOWNGRADE = 299 + SYS_PSYNCH_RW_UPGRADE = 300 + SYS_PSYNCH_MUTEXWAIT = 301 + SYS_PSYNCH_MUTEXDROP = 302 + SYS_PSYNCH_CVBROAD = 303 + SYS_PSYNCH_CVSIGNAL = 304 + SYS_PSYNCH_CVWAIT = 305 + SYS_PSYNCH_RW_RDLOCK = 306 + SYS_PSYNCH_RW_WRLOCK = 307 + SYS_PSYNCH_RW_UNLOCK = 308 + SYS_PSYNCH_RW_UNLOCK2 = 309 + SYS_GETSID = 310 + SYS_SETTID_WITH_PID = 311 + SYS_PSYNCH_CVCLRPREPOST = 312 + SYS_AIO_FSYNC = 313 + SYS_AIO_RETURN = 314 + SYS_AIO_SUSPEND = 315 + SYS_AIO_CANCEL = 316 + SYS_AIO_ERROR = 317 + SYS_AIO_READ = 318 + SYS_AIO_WRITE = 319 + SYS_LIO_LISTIO = 320 + SYS_IOPOLICYSYS = 322 + SYS_PROCESS_POLICY = 323 + SYS_MLOCKALL = 324 + SYS_MUNLOCKALL = 325 + SYS_ISSETUGID = 327 + SYS___PTHREAD_KILL = 328 + SYS___PTHREAD_SIGMASK = 329 + SYS___SIGWAIT = 330 + SYS___DISABLE_THREADSIGNAL = 331 + SYS___PTHREAD_MARKCANCEL = 332 + SYS___PTHREAD_CANCELED = 333 + SYS___SEMWAIT_SIGNAL = 334 + SYS_PROC_INFO = 336 + SYS_SENDFILE = 337 + SYS_STAT64 = 338 + SYS_FSTAT64 = 339 + SYS_LSTAT64 = 340 + SYS_STAT64_EXTENDED = 341 + SYS_LSTAT64_EXTENDED = 342 + SYS_FSTAT64_EXTENDED = 343 + SYS_GETDIRENTRIES64 = 344 + SYS_STATFS64 = 345 + SYS_FSTATFS64 = 346 + SYS_GETFSSTAT64 = 347 + SYS___PTHREAD_CHDIR = 348 + SYS___PTHREAD_FCHDIR = 349 + SYS_AUDIT = 350 + SYS_AUDITON = 351 + SYS_GETAUID = 353 + SYS_SETAUID = 354 + SYS_GETAUDIT_ADDR = 357 + SYS_SETAUDIT_ADDR = 358 + SYS_AUDITCTL = 359 + SYS_BSDTHREAD_CREATE = 360 + SYS_BSDTHREAD_TERMINATE = 361 + SYS_KQUEUE = 362 + SYS_KEVENT = 363 + SYS_LCHOWN = 364 + SYS_BSDTHREAD_REGISTER = 366 + SYS_WORKQ_OPEN = 367 + SYS_WORKQ_KERNRETURN = 368 + SYS_KEVENT64 = 369 + SYS___OLD_SEMWAIT_SIGNAL = 370 + SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371 + SYS_THREAD_SELFID = 372 + SYS_LEDGER = 373 + SYS_KEVENT_QOS = 374 + SYS_KEVENT_ID = 375 + SYS___MAC_EXECVE = 380 + SYS___MAC_SYSCALL = 381 + SYS___MAC_GET_FILE = 382 + SYS___MAC_SET_FILE = 383 + SYS___MAC_GET_LINK = 384 + SYS___MAC_SET_LINK = 385 + SYS___MAC_GET_PROC = 386 + SYS___MAC_SET_PROC = 387 + SYS___MAC_GET_FD = 388 + SYS___MAC_SET_FD = 389 + SYS___MAC_GET_PID = 390 + SYS_PSELECT = 394 + SYS_PSELECT_NOCANCEL = 395 + SYS_READ_NOCANCEL = 396 + SYS_WRITE_NOCANCEL = 397 + SYS_OPEN_NOCANCEL = 398 + SYS_CLOSE_NOCANCEL = 399 + SYS_WAIT4_NOCANCEL = 400 + SYS_RECVMSG_NOCANCEL = 401 + SYS_SENDMSG_NOCANCEL = 402 + SYS_RECVFROM_NOCANCEL = 403 + SYS_ACCEPT_NOCANCEL = 404 + SYS_MSYNC_NOCANCEL = 405 + SYS_FCNTL_NOCANCEL = 406 + SYS_SELECT_NOCANCEL = 407 + SYS_FSYNC_NOCANCEL = 408 + SYS_CONNECT_NOCANCEL = 409 + SYS_SIGSUSPEND_NOCANCEL = 410 + SYS_READV_NOCANCEL = 411 + SYS_WRITEV_NOCANCEL = 412 + SYS_SENDTO_NOCANCEL = 413 + SYS_PREAD_NOCANCEL = 414 + SYS_PWRITE_NOCANCEL = 415 + SYS_WAITID_NOCANCEL = 416 + SYS_POLL_NOCANCEL = 417 + SYS_MSGSND_NOCANCEL = 418 + SYS_MSGRCV_NOCANCEL = 419 + SYS_SEM_WAIT_NOCANCEL = 420 + SYS_AIO_SUSPEND_NOCANCEL = 421 + SYS___SIGWAIT_NOCANCEL = 422 + SYS___SEMWAIT_SIGNAL_NOCANCEL = 423 + SYS___MAC_MOUNT = 424 + SYS___MAC_GET_MOUNT = 425 + SYS___MAC_GETFSSTAT = 426 + SYS_FSGETPATH = 427 + SYS_AUDIT_SESSION_SELF = 428 + SYS_AUDIT_SESSION_JOIN = 429 + SYS_FILEPORT_MAKEPORT = 430 + SYS_FILEPORT_MAKEFD = 431 + SYS_AUDIT_SESSION_PORT = 432 + SYS_PID_SUSPEND = 433 + SYS_PID_RESUME = 434 + SYS_PID_HIBERNATE = 435 + SYS_PID_SHUTDOWN_SOCKETS = 436 + SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438 + SYS_KAS_INFO = 439 + SYS_MEMORYSTATUS_CONTROL = 440 + SYS_GUARDED_OPEN_NP = 441 + SYS_GUARDED_CLOSE_NP = 442 + SYS_GUARDED_KQUEUE_NP = 443 + SYS_CHANGE_FDGUARD_NP = 444 + SYS_USRCTL = 445 + SYS_PROC_RLIMIT_CONTROL = 446 + SYS_CONNECTX = 447 + SYS_DISCONNECTX = 448 + SYS_PEELOFF = 449 + SYS_SOCKET_DELEGATE = 450 + SYS_TELEMETRY = 451 + SYS_PROC_UUID_POLICY = 452 + SYS_MEMORYSTATUS_GET_LEVEL = 453 + SYS_SYSTEM_OVERRIDE = 454 + SYS_VFS_PURGE = 455 + SYS_SFI_CTL = 456 + SYS_SFI_PIDCTL = 457 + SYS_COALITION = 458 + SYS_COALITION_INFO = 459 + SYS_NECP_MATCH_POLICY = 460 + SYS_GETATTRLISTBULK = 461 + SYS_CLONEFILEAT = 462 + SYS_OPENAT = 463 + SYS_OPENAT_NOCANCEL = 464 + SYS_RENAMEAT = 465 + SYS_FACCESSAT = 466 + SYS_FCHMODAT = 467 + SYS_FCHOWNAT = 468 + SYS_FSTATAT = 469 + SYS_FSTATAT64 = 470 + SYS_LINKAT = 471 + SYS_UNLINKAT = 472 + SYS_READLINKAT = 473 + SYS_SYMLINKAT = 474 + SYS_MKDIRAT = 475 + SYS_GETATTRLISTAT = 476 + SYS_PROC_TRACE_LOG = 477 + SYS_BSDTHREAD_CTL = 478 + SYS_OPENBYID_NP = 479 + SYS_RECVMSG_X = 480 + SYS_SENDMSG_X = 481 + SYS_THREAD_SELFUSAGE = 482 + SYS_CSRCTL = 483 + SYS_GUARDED_OPEN_DPROTECTED_NP = 484 + SYS_GUARDED_WRITE_NP = 485 + SYS_GUARDED_PWRITE_NP = 486 + SYS_GUARDED_WRITEV_NP = 487 + SYS_RENAMEATX_NP = 488 + SYS_MREMAP_ENCRYPTED = 489 + SYS_NETAGENT_TRIGGER = 490 + SYS_STACK_SNAPSHOT_WITH_CONFIG = 491 + SYS_MICROSTACKSHOT = 492 + SYS_GRAB_PGO_DATA = 493 + SYS_PERSONA = 494 + SYS_WORK_INTERVAL_CTL = 499 + SYS_GETENTROPY = 500 + SYS_NECP_OPEN = 501 + SYS_NECP_CLIENT_ACTION = 502 + SYS___NEXUS_OPEN = 503 + SYS___NEXUS_REGISTER = 504 + SYS___NEXUS_DEREGISTER = 505 + SYS___NEXUS_CREATE = 506 + SYS___NEXUS_DESTROY = 507 + SYS___NEXUS_GET_OPT = 508 + SYS___NEXUS_SET_OPT = 509 + SYS___CHANNEL_OPEN = 510 + SYS___CHANNEL_GET_INFO = 511 + SYS___CHANNEL_SYNC = 512 + SYS___CHANNEL_GET_OPT = 513 + SYS___CHANNEL_SET_OPT = 514 + SYS_ULOCK_WAIT = 515 + SYS_ULOCK_WAKE = 516 + SYS_FCLONEFILEAT = 517 + SYS_FS_SNAPSHOT = 518 + SYS_TERMINATE_WITH_PAYLOAD = 520 + SYS_ABORT_WITH_PAYLOAD = 521 + SYS_NECP_SESSION_OPEN = 522 + SYS_NECP_SESSION_ACTION = 523 + SYS_SETATTRLISTAT = 524 + SYS_NET_QOS_GUIDELINE = 525 + SYS_FMOUNT = 526 + SYS_NTP_ADJTIME = 527 + SYS_NTP_GETTIME = 528 + SYS_OS_FAULT_WITH_PAYLOAD = 529 + SYS_MAXSYSCALL = 530 + SYS_INVALID = 63 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index a597e061caed0..0f5a3f6970a26 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,6 +431,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8c102e55a1dfd..36d5219ef824f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,6 +353,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 98f9b68fb9055..3622ba14b4e11 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,6 +395,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 4dabc33fbc9f6..6193c3dc07c14 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -298,6 +298,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d5724e5964d72..640b974345f31 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -416,6 +416,7 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index c1d824a4f3d24..3467fbb5ff1c4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -346,6 +346,7 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 598dd5d6375a4..0fc38d5a72f02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -346,6 +346,7 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index c36782d08e270..999fd55bccb70 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -416,6 +416,7 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_CLOSE_RANGE = 4436 SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 9287538d36866..1df0d799355df 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,6 +395,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4dafad8352ada..4db39cca4da51 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,6 +395,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 6642cfccdf160..e6927401446f7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,6 +297,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 23367b9467f02..a585aec4e7971 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,6 +360,7 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 083aa0204ef8e..d047e567afc60 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,6 +374,7 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index dd56ab84a2267..830fbb35c0a4c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -194,6 +194,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -258,6 +267,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -498,3 +508,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 1f82f2b51ac78..e53a7c49ffe8d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -199,6 +199,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -263,6 +272,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -503,3 +513,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 3af01a49f2aba..98be973ef94b0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -194,6 +194,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -258,6 +267,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -498,3 +508,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index ff437386b68bd..ddae5afe1ba6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -199,6 +199,15 @@ type RawSockaddrAny struct { Pad [92]int8 } +type RawSockaddrCtl struct { + Sc_len uint8 + Sc_family uint8 + Ss_sysaddr uint16 + Sc_id uint32 + Sc_unit uint32 + Sc_reserved [5]uint32 +} + type _Socklen uint32 type Linger struct { @@ -263,6 +272,7 @@ const ( SizeofSockaddrAny = 0x6c SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 @@ -503,3 +513,8 @@ type Clockinfo struct { Stathz int32 Profhz int32 } + +type CtlInfo struct { + Id uint32 + Name [96]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8d1429a0ecb78..504ef131fb8cf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -462,166 +462,107 @@ const ( ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x36 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc ) type NlMsghdr struct { @@ -946,7 +887,10 @@ type PerfEventMmapPage struct { Time_offset uint64 Time_zero uint64 Size uint32 - _ [948]uint8 + _ uint32 + Time_cycles uint64 + Time_mask uint64 + _ [928]uint8 Data_head uint64 Data_tail uint64 Data_offset uint64 @@ -1145,10 +1089,12 @@ const ( PERF_RECORD_KSYMBOL = 0x11 PERF_RECORD_BPF_EVENT = 0x12 PERF_RECORD_CGROUP = 0x13 - PERF_RECORD_MAX = 0x14 + PERF_RECORD_TEXT_POKE = 0x14 + PERF_RECORD_MAX = 0x15 PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0x0 PERF_RECORD_KSYMBOL_TYPE_BPF = 0x1 - PERF_RECORD_KSYMBOL_TYPE_MAX = 0x2 + PERF_RECORD_KSYMBOL_TYPE_OOL = 0x2 + PERF_RECORD_KSYMBOL_TYPE_MAX = 0x3 PERF_BPF_EVENT_UNKNOWN = 0x0 PERF_BPF_EVENT_PROG_LOAD = 0x1 PERF_BPF_EVENT_PROG_UNLOAD = 0x2 @@ -1378,6 +1324,394 @@ const ( SizeofTpacketStatsV3 = 0xc ) +const ( + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_PROP_LIST = 0x34 + IFLA_ALT_IFNAME = 0x35 + IFLA_PERM_ADDRESS = 0x36 + IFLA_INET_UNSPEC = 0x0 + IFLA_INET_CONF = 0x1 + IFLA_INET6_UNSPEC = 0x0 + IFLA_INET6_FLAGS = 0x1 + IFLA_INET6_CONF = 0x2 + IFLA_INET6_STATS = 0x3 + IFLA_INET6_MCAST = 0x4 + IFLA_INET6_CACHEINFO = 0x5 + IFLA_INET6_ICMP6STATS = 0x6 + IFLA_INET6_TOKEN = 0x7 + IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_BR_UNSPEC = 0x0 + IFLA_BR_FORWARD_DELAY = 0x1 + IFLA_BR_HELLO_TIME = 0x2 + IFLA_BR_MAX_AGE = 0x3 + IFLA_BR_AGEING_TIME = 0x4 + IFLA_BR_STP_STATE = 0x5 + IFLA_BR_PRIORITY = 0x6 + IFLA_BR_VLAN_FILTERING = 0x7 + IFLA_BR_VLAN_PROTOCOL = 0x8 + IFLA_BR_GROUP_FWD_MASK = 0x9 + IFLA_BR_ROOT_ID = 0xa + IFLA_BR_BRIDGE_ID = 0xb + IFLA_BR_ROOT_PORT = 0xc + IFLA_BR_ROOT_PATH_COST = 0xd + IFLA_BR_TOPOLOGY_CHANGE = 0xe + IFLA_BR_TOPOLOGY_CHANGE_DETECTED = 0xf + IFLA_BR_HELLO_TIMER = 0x10 + IFLA_BR_TCN_TIMER = 0x11 + IFLA_BR_TOPOLOGY_CHANGE_TIMER = 0x12 + IFLA_BR_GC_TIMER = 0x13 + IFLA_BR_GROUP_ADDR = 0x14 + IFLA_BR_FDB_FLUSH = 0x15 + IFLA_BR_MCAST_ROUTER = 0x16 + IFLA_BR_MCAST_SNOOPING = 0x17 + IFLA_BR_MCAST_QUERY_USE_IFADDR = 0x18 + IFLA_BR_MCAST_QUERIER = 0x19 + IFLA_BR_MCAST_HASH_ELASTICITY = 0x1a + IFLA_BR_MCAST_HASH_MAX = 0x1b + IFLA_BR_MCAST_LAST_MEMBER_CNT = 0x1c + IFLA_BR_MCAST_STARTUP_QUERY_CNT = 0x1d + IFLA_BR_MCAST_LAST_MEMBER_INTVL = 0x1e + IFLA_BR_MCAST_MEMBERSHIP_INTVL = 0x1f + IFLA_BR_MCAST_QUERIER_INTVL = 0x20 + IFLA_BR_MCAST_QUERY_INTVL = 0x21 + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL = 0x22 + IFLA_BR_MCAST_STARTUP_QUERY_INTVL = 0x23 + IFLA_BR_NF_CALL_IPTABLES = 0x24 + IFLA_BR_NF_CALL_IP6TABLES = 0x25 + IFLA_BR_NF_CALL_ARPTABLES = 0x26 + IFLA_BR_VLAN_DEFAULT_PVID = 0x27 + IFLA_BR_PAD = 0x28 + IFLA_BR_VLAN_STATS_ENABLED = 0x29 + IFLA_BR_MCAST_STATS_ENABLED = 0x2a + IFLA_BR_MCAST_IGMP_VERSION = 0x2b + IFLA_BR_MCAST_MLD_VERSION = 0x2c + IFLA_BR_VLAN_STATS_PER_PORT = 0x2d + IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BRPORT_UNSPEC = 0x0 + IFLA_BRPORT_STATE = 0x1 + IFLA_BRPORT_PRIORITY = 0x2 + IFLA_BRPORT_COST = 0x3 + IFLA_BRPORT_MODE = 0x4 + IFLA_BRPORT_GUARD = 0x5 + IFLA_BRPORT_PROTECT = 0x6 + IFLA_BRPORT_FAST_LEAVE = 0x7 + IFLA_BRPORT_LEARNING = 0x8 + IFLA_BRPORT_UNICAST_FLOOD = 0x9 + IFLA_BRPORT_PROXYARP = 0xa + IFLA_BRPORT_LEARNING_SYNC = 0xb + IFLA_BRPORT_PROXYARP_WIFI = 0xc + IFLA_BRPORT_ROOT_ID = 0xd + IFLA_BRPORT_BRIDGE_ID = 0xe + IFLA_BRPORT_DESIGNATED_PORT = 0xf + IFLA_BRPORT_DESIGNATED_COST = 0x10 + IFLA_BRPORT_ID = 0x11 + IFLA_BRPORT_NO = 0x12 + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 0x13 + IFLA_BRPORT_CONFIG_PENDING = 0x14 + IFLA_BRPORT_MESSAGE_AGE_TIMER = 0x15 + IFLA_BRPORT_FORWARD_DELAY_TIMER = 0x16 + IFLA_BRPORT_HOLD_TIMER = 0x17 + IFLA_BRPORT_FLUSH = 0x18 + IFLA_BRPORT_MULTICAST_ROUTER = 0x19 + IFLA_BRPORT_PAD = 0x1a + IFLA_BRPORT_MCAST_FLOOD = 0x1b + IFLA_BRPORT_MCAST_TO_UCAST = 0x1c + IFLA_BRPORT_VLAN_TUNNEL = 0x1d + IFLA_BRPORT_BCAST_FLOOD = 0x1e + IFLA_BRPORT_GROUP_FWD_MASK = 0x1f + IFLA_BRPORT_NEIGH_SUPPRESS = 0x20 + IFLA_BRPORT_ISOLATED = 0x21 + IFLA_BRPORT_BACKUP_PORT = 0x22 + IFLA_BRPORT_MRP_RING_OPEN = 0x23 + IFLA_INFO_UNSPEC = 0x0 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + IFLA_VLAN_UNSPEC = 0x0 + IFLA_VLAN_ID = 0x1 + IFLA_VLAN_FLAGS = 0x2 + IFLA_VLAN_EGRESS_QOS = 0x3 + IFLA_VLAN_INGRESS_QOS = 0x4 + IFLA_VLAN_PROTOCOL = 0x5 + IFLA_VLAN_QOS_UNSPEC = 0x0 + IFLA_VLAN_QOS_MAPPING = 0x1 + IFLA_MACVLAN_UNSPEC = 0x0 + IFLA_MACVLAN_MODE = 0x1 + IFLA_MACVLAN_FLAGS = 0x2 + IFLA_MACVLAN_MACADDR_MODE = 0x3 + IFLA_MACVLAN_MACADDR = 0x4 + IFLA_MACVLAN_MACADDR_DATA = 0x5 + IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_VRF_UNSPEC = 0x0 + IFLA_VRF_TABLE = 0x1 + IFLA_VRF_PORT_UNSPEC = 0x0 + IFLA_VRF_PORT_TABLE = 0x1 + IFLA_MACSEC_UNSPEC = 0x0 + IFLA_MACSEC_SCI = 0x1 + IFLA_MACSEC_PORT = 0x2 + IFLA_MACSEC_ICV_LEN = 0x3 + IFLA_MACSEC_CIPHER_SUITE = 0x4 + IFLA_MACSEC_WINDOW = 0x5 + IFLA_MACSEC_ENCODING_SA = 0x6 + IFLA_MACSEC_ENCRYPT = 0x7 + IFLA_MACSEC_PROTECT = 0x8 + IFLA_MACSEC_INC_SCI = 0x9 + IFLA_MACSEC_ES = 0xa + IFLA_MACSEC_SCB = 0xb + IFLA_MACSEC_REPLAY_PROTECT = 0xc + IFLA_MACSEC_VALIDATION = 0xd + IFLA_MACSEC_PAD = 0xe + IFLA_MACSEC_OFFLOAD = 0xf + IFLA_XFRM_UNSPEC = 0x0 + IFLA_XFRM_LINK = 0x1 + IFLA_XFRM_IF_ID = 0x2 + IFLA_IPVLAN_UNSPEC = 0x0 + IFLA_IPVLAN_MODE = 0x1 + IFLA_IPVLAN_FLAGS = 0x2 + IFLA_VXLAN_UNSPEC = 0x0 + IFLA_VXLAN_ID = 0x1 + IFLA_VXLAN_GROUP = 0x2 + IFLA_VXLAN_LINK = 0x3 + IFLA_VXLAN_LOCAL = 0x4 + IFLA_VXLAN_TTL = 0x5 + IFLA_VXLAN_TOS = 0x6 + IFLA_VXLAN_LEARNING = 0x7 + IFLA_VXLAN_AGEING = 0x8 + IFLA_VXLAN_LIMIT = 0x9 + IFLA_VXLAN_PORT_RANGE = 0xa + IFLA_VXLAN_PROXY = 0xb + IFLA_VXLAN_RSC = 0xc + IFLA_VXLAN_L2MISS = 0xd + IFLA_VXLAN_L3MISS = 0xe + IFLA_VXLAN_PORT = 0xf + IFLA_VXLAN_GROUP6 = 0x10 + IFLA_VXLAN_LOCAL6 = 0x11 + IFLA_VXLAN_UDP_CSUM = 0x12 + IFLA_VXLAN_UDP_ZERO_CSUM6_TX = 0x13 + IFLA_VXLAN_UDP_ZERO_CSUM6_RX = 0x14 + IFLA_VXLAN_REMCSUM_TX = 0x15 + IFLA_VXLAN_REMCSUM_RX = 0x16 + IFLA_VXLAN_GBP = 0x17 + IFLA_VXLAN_REMCSUM_NOPARTIAL = 0x18 + IFLA_VXLAN_COLLECT_METADATA = 0x19 + IFLA_VXLAN_LABEL = 0x1a + IFLA_VXLAN_GPE = 0x1b + IFLA_VXLAN_TTL_INHERIT = 0x1c + IFLA_VXLAN_DF = 0x1d + IFLA_GENEVE_UNSPEC = 0x0 + IFLA_GENEVE_ID = 0x1 + IFLA_GENEVE_REMOTE = 0x2 + IFLA_GENEVE_TTL = 0x3 + IFLA_GENEVE_TOS = 0x4 + IFLA_GENEVE_PORT = 0x5 + IFLA_GENEVE_COLLECT_METADATA = 0x6 + IFLA_GENEVE_REMOTE6 = 0x7 + IFLA_GENEVE_UDP_CSUM = 0x8 + IFLA_GENEVE_UDP_ZERO_CSUM6_TX = 0x9 + IFLA_GENEVE_UDP_ZERO_CSUM6_RX = 0xa + IFLA_GENEVE_LABEL = 0xb + IFLA_GENEVE_TTL_INHERIT = 0xc + IFLA_GENEVE_DF = 0xd + IFLA_BAREUDP_UNSPEC = 0x0 + IFLA_BAREUDP_PORT = 0x1 + IFLA_BAREUDP_ETHERTYPE = 0x2 + IFLA_BAREUDP_SRCPORT_MIN = 0x3 + IFLA_BAREUDP_MULTIPROTO_MODE = 0x4 + IFLA_PPP_UNSPEC = 0x0 + IFLA_PPP_DEV_FD = 0x1 + IFLA_GTP_UNSPEC = 0x0 + IFLA_GTP_FD0 = 0x1 + IFLA_GTP_FD1 = 0x2 + IFLA_GTP_PDP_HASHSIZE = 0x3 + IFLA_GTP_ROLE = 0x4 + IFLA_BOND_UNSPEC = 0x0 + IFLA_BOND_MODE = 0x1 + IFLA_BOND_ACTIVE_SLAVE = 0x2 + IFLA_BOND_MIIMON = 0x3 + IFLA_BOND_UPDELAY = 0x4 + IFLA_BOND_DOWNDELAY = 0x5 + IFLA_BOND_USE_CARRIER = 0x6 + IFLA_BOND_ARP_INTERVAL = 0x7 + IFLA_BOND_ARP_IP_TARGET = 0x8 + IFLA_BOND_ARP_VALIDATE = 0x9 + IFLA_BOND_ARP_ALL_TARGETS = 0xa + IFLA_BOND_PRIMARY = 0xb + IFLA_BOND_PRIMARY_RESELECT = 0xc + IFLA_BOND_FAIL_OVER_MAC = 0xd + IFLA_BOND_XMIT_HASH_POLICY = 0xe + IFLA_BOND_RESEND_IGMP = 0xf + IFLA_BOND_NUM_PEER_NOTIF = 0x10 + IFLA_BOND_ALL_SLAVES_ACTIVE = 0x11 + IFLA_BOND_MIN_LINKS = 0x12 + IFLA_BOND_LP_INTERVAL = 0x13 + IFLA_BOND_PACKETS_PER_SLAVE = 0x14 + IFLA_BOND_AD_LACP_RATE = 0x15 + IFLA_BOND_AD_SELECT = 0x16 + IFLA_BOND_AD_INFO = 0x17 + IFLA_BOND_AD_ACTOR_SYS_PRIO = 0x18 + IFLA_BOND_AD_USER_PORT_KEY = 0x19 + IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a + IFLA_BOND_TLB_DYNAMIC_LB = 0x1b + IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_INFO_UNSPEC = 0x0 + IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 + IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 + IFLA_BOND_AD_INFO_ACTOR_KEY = 0x3 + IFLA_BOND_AD_INFO_PARTNER_KEY = 0x4 + IFLA_BOND_AD_INFO_PARTNER_MAC = 0x5 + IFLA_BOND_SLAVE_UNSPEC = 0x0 + IFLA_BOND_SLAVE_STATE = 0x1 + IFLA_BOND_SLAVE_MII_STATUS = 0x2 + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT = 0x3 + IFLA_BOND_SLAVE_PERM_HWADDR = 0x4 + IFLA_BOND_SLAVE_QUEUE_ID = 0x5 + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_VF_INFO_UNSPEC = 0x0 + IFLA_VF_INFO = 0x1 + IFLA_VF_UNSPEC = 0x0 + IFLA_VF_MAC = 0x1 + IFLA_VF_VLAN = 0x2 + IFLA_VF_TX_RATE = 0x3 + IFLA_VF_SPOOFCHK = 0x4 + IFLA_VF_LINK_STATE = 0x5 + IFLA_VF_RATE = 0x6 + IFLA_VF_RSS_QUERY_EN = 0x7 + IFLA_VF_STATS = 0x8 + IFLA_VF_TRUST = 0x9 + IFLA_VF_IB_NODE_GUID = 0xa + IFLA_VF_IB_PORT_GUID = 0xb + IFLA_VF_VLAN_LIST = 0xc + IFLA_VF_BROADCAST = 0xd + IFLA_VF_VLAN_INFO_UNSPEC = 0x0 + IFLA_VF_VLAN_INFO = 0x1 + IFLA_VF_LINK_STATE_AUTO = 0x0 + IFLA_VF_LINK_STATE_ENABLE = 0x1 + IFLA_VF_LINK_STATE_DISABLE = 0x2 + IFLA_VF_STATS_RX_PACKETS = 0x0 + IFLA_VF_STATS_TX_PACKETS = 0x1 + IFLA_VF_STATS_RX_BYTES = 0x2 + IFLA_VF_STATS_TX_BYTES = 0x3 + IFLA_VF_STATS_BROADCAST = 0x4 + IFLA_VF_STATS_MULTICAST = 0x5 + IFLA_VF_STATS_PAD = 0x6 + IFLA_VF_STATS_RX_DROPPED = 0x7 + IFLA_VF_STATS_TX_DROPPED = 0x8 + IFLA_VF_PORT_UNSPEC = 0x0 + IFLA_VF_PORT = 0x1 + IFLA_PORT_UNSPEC = 0x0 + IFLA_PORT_VF = 0x1 + IFLA_PORT_PROFILE = 0x2 + IFLA_PORT_VSI_TYPE = 0x3 + IFLA_PORT_INSTANCE_UUID = 0x4 + IFLA_PORT_HOST_UUID = 0x5 + IFLA_PORT_REQUEST = 0x6 + IFLA_PORT_RESPONSE = 0x7 + IFLA_IPOIB_UNSPEC = 0x0 + IFLA_IPOIB_PKEY = 0x1 + IFLA_IPOIB_MODE = 0x2 + IFLA_IPOIB_UMCAST = 0x3 + IFLA_HSR_UNSPEC = 0x0 + IFLA_HSR_SLAVE1 = 0x1 + IFLA_HSR_SLAVE2 = 0x2 + IFLA_HSR_MULTICAST_SPEC = 0x3 + IFLA_HSR_SUPERVISION_ADDR = 0x4 + IFLA_HSR_SEQ_NR = 0x5 + IFLA_HSR_VERSION = 0x6 + IFLA_STATS_UNSPEC = 0x0 + IFLA_STATS_LINK_64 = 0x1 + IFLA_STATS_LINK_XSTATS = 0x2 + IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 + IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 + IFLA_STATS_AF_SPEC = 0x5 + IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_XDP_UNSPEC = 0x0 + IFLA_XDP_FD = 0x1 + IFLA_XDP_ATTACHED = 0x2 + IFLA_XDP_FLAGS = 0x3 + IFLA_XDP_PROG_ID = 0x4 + IFLA_XDP_DRV_PROG_ID = 0x5 + IFLA_XDP_SKB_PROG_ID = 0x6 + IFLA_XDP_HW_PROG_ID = 0x7 + IFLA_XDP_EXPECTED_FD = 0x8 + IFLA_EVENT_NONE = 0x0 + IFLA_EVENT_REBOOT = 0x1 + IFLA_EVENT_FEATURES = 0x2 + IFLA_EVENT_BONDING_FAILOVER = 0x3 + IFLA_EVENT_NOTIFY_PEERS = 0x4 + IFLA_EVENT_IGMP_RESEND = 0x5 + IFLA_EVENT_BONDING_OPTIONS = 0x6 + IFLA_TUN_UNSPEC = 0x0 + IFLA_TUN_OWNER = 0x1 + IFLA_TUN_GROUP = 0x2 + IFLA_TUN_TYPE = 0x3 + IFLA_TUN_PI = 0x4 + IFLA_TUN_VNET_HDR = 0x5 + IFLA_TUN_PERSIST = 0x6 + IFLA_TUN_MULTI_QUEUE = 0x7 + IFLA_TUN_NUM_QUEUES = 0x8 + IFLA_TUN_NUM_DISABLED_QUEUES = 0x9 + IFLA_RMNET_UNSPEC = 0x0 + IFLA_RMNET_MUX_ID = 0x1 + IFLA_RMNET_FLAGS = 0x2 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -1465,7 +1799,7 @@ const ( NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 NFTA_HOOK_HOOKNUM = 0x1 @@ -1904,9 +2238,12 @@ type XDPMmapOffsets struct { } type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 + Rx_ring_full uint64 + Rx_fill_ring_empty_descs uint64 + Tx_ring_empty_descs uint64 } type XDPDesc struct { @@ -2553,7 +2890,7 @@ const ( DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c DEVLINK_ATTR_PAD = 0x3d DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e - DEVLINK_ATTR_MAX = 0x90 + DEVLINK_ATTR_MAX = 0x94 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -2626,3 +2963,39 @@ type WatchdogInfo struct { Version uint32 Identity [32]uint8 } + +type PPSFData struct { + Info PPSKInfo + Timeout PPSKTime +} + +type PPSKParams struct { + Api_version int32 + Mode int32 + Assert_off_tu PPSKTime + Clear_off_tu PPSKTime +} + +type PPSKTime struct { + Sec int64 + Nsec int32 + Flags uint32 +} + +const ( + LWTUNNEL_ENCAP_NONE = 0x0 + LWTUNNEL_ENCAP_MPLS = 0x1 + LWTUNNEL_ENCAP_IP = 0x2 + LWTUNNEL_ENCAP_ILA = 0x3 + LWTUNNEL_ENCAP_IP6 = 0x4 + LWTUNNEL_ENCAP_SEG6 = 0x5 + LWTUNNEL_ENCAP_BPF = 0x6 + LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 + LWTUNNEL_ENCAP_RPL = 0x8 + LWTUNNEL_ENCAP_MAX = 0x8 + + MPLS_IPTUNNEL_UNSPEC = 0x0 + MPLS_IPTUNNEL_DST = 0x1 + MPLS_IPTUNNEL_TTL = 0x2 + MPLS_IPTUNNEL_MAX = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 73509d896a2ac..d54618aa61f58 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -602,3 +602,18 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 +} + +const ( + PPS_GETPARAMS = 0x800470a1 + PPS_SETPARAMS = 0x400470a2 + PPS_GETCAP = 0x800470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 45eb8738b0df1..741d25be95742 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -619,3 +619,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 8f6b453aba5b1..e8d982c3df7ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -596,3 +596,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800470a1 + PPS_SETPARAMS = 0x400470a2 + PPS_GETCAP = 0x800470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index b1e0c24f192fd..311cf2155d577 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -598,3 +598,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index fb802c3ec9b37..1312bdf77feac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -602,3 +602,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 30abcf3bb8e30..2a993481950c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -601,3 +601,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 99761aa9a78ac..f964307b293c9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -601,3 +601,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 293690348f6e2..ca0fab27020bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -602,3 +602,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400470a1 + PPS_SETPARAMS = 0x800470a2 + PPS_GETCAP = 0x400470a3 + PPS_FETCH = 0xc00470a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0ca856e559b6c..257e0042473c2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -608,3 +608,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index f50f6482eee7d..980dd31736a6f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -608,3 +608,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 4d3ac8d7b4097..d9fdab20b83da 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -626,3 +626,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 349f483a80ea6..c25de8c679cda 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -622,3 +622,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 80c73beaa1556..97fca65340e65 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -603,3 +603,19 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x400870a1 + PPS_SETPARAMS = 0x800870a2 + PPS_GETCAP = 0x400870a3 + PPS_FETCH = 0xc00870a4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 23ed9fe51d4cf..db817f3ba828c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -88,7 +88,6 @@ type Stat_t struct { Mtim Timespec Ctim Timespec Blksize int32 - _ [4]byte Blocks int64 Fstype [16]int8 } @@ -96,7 +95,6 @@ type Stat_t struct { type Flock_t struct { Type int16 Whence int16 - _ [4]byte Start int64 Len int64 Sysid int32 @@ -138,12 +136,12 @@ type RawSockaddrInet4 struct { } type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 - X__sin6_src_id uint32 + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + _ uint32 } type RawSockaddrUnix struct { @@ -196,10 +194,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Accrights *int8 Accrightslen int32 _ [4]byte @@ -228,7 +224,7 @@ type IPv6MTUInfo struct { } type ICMPv6Filter struct { - X__icmp6_filt [8]uint32 + Filt [8]uint32 } const ( @@ -291,7 +287,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -299,7 +294,6 @@ type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 - _ [1]byte Mtu uint32 Metric uint32 Baudrate uint32 @@ -324,7 +318,6 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Metric int32 } @@ -333,7 +326,6 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte Flags int32 Addrs int32 Pid int32 @@ -371,15 +363,14 @@ type BpfVersion struct { } type BpfStat struct { - Recv uint64 - Drop uint64 - Capt uint64 - Padding [13]uint64 + Recv uint64 + Drop uint64 + Capt uint64 + _ [13]uint64 } type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 82076fb74ff94..9cd147b7e3ffd 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -32,6 +32,8 @@ type DLLError struct { func (e *DLLError) Error() string { return e.Msg } +func (e *DLLError) Unwrap() error { return e.Err } + // A DLL implements access to a single DLL. type DLL struct { Name string diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index e409d76f0fde7..1adb60739a34b 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -16,13 +16,19 @@ const ( MEM_RESET_UNDO = 0x01000000 MEM_LARGE_PAGES = 0x20000000 - PAGE_NOACCESS = 0x01 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 + PAGE_NOACCESS = 0x00000001 + PAGE_READONLY = 0x00000002 + PAGE_READWRITE = 0x00000004 + PAGE_WRITECOPY = 0x00000008 + PAGE_EXECUTE = 0x00000010 + PAGE_EXECUTE_READ = 0x00000020 + PAGE_EXECUTE_READWRITE = 0x00000040 + PAGE_EXECUTE_WRITECOPY = 0x00000080 + PAGE_GUARD = 0x00000100 + PAGE_NOCACHE = 0x00000200 + PAGE_WRITECOMBINE = 0x00000400 + PAGE_TARGETS_INVALID = 0x40000000 + PAGE_TARGETS_NO_UPDATE = 0x40000000 QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index 3778075da0f54..fc1835d8a233a 100644 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -19,6 +19,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +27,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -40,50 +41,50 @@ var ( modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") - procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") - procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") - procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") ) -func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } return } -func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { regerrno = syscall.Errno(r0) } return } -func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) if r0 != 0 { regerrno = syscall.Errno(r0) } return } -func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) if r0 != 0 { regerrno = syscall.Errno(r0) } return } -func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -98,8 +99,8 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint return } -func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -110,11 +111,7 @@ func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 9e3c44a855708..14906485f3a1f 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1103,9 +1103,10 @@ type OBJECTS_AND_NAME struct { } //sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo -//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo //sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW //sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW +//sys SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity //sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW //sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go new file mode 100644 index 0000000000000..1681810e04888 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go @@ -0,0 +1,100 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index af828a91bcf3f..6122f557a097a 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -25,17 +25,20 @@ package windows // import "golang.org/x/sys/windows" import ( + "bytes" + "strings" "syscall" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any // location, it returns (nil, syscall.EINVAL). func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL } a := make([]byte, len(s)+1) copy(a, s) @@ -53,6 +56,41 @@ func BytePtrFromString(s string) (*byte, error) { return &a[0], nil } +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + var s []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(s) +} + // Single-word zero for use when we need a valid pointer to 0 bytes. // See mksyscall.pl. var _zero uintptr diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 65ca6d20f03e4..86a46f7713a17 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -92,11 +92,11 @@ func UTF16FromString(s string) ([]uint16, error) { } // UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, -// with a terminating NUL removed. +// with a terminating NUL and any bytes after the NUL removed. func UTF16ToString(s []uint16) string { for i, v := range s { if v == 0 { - s = s[0:i] + s = s[:i] break } } @@ -120,7 +120,7 @@ func UTF16PtrFromString(s string) (*uint16, error) { } // UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. -// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated +// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated // at a zero word; if the zero word is not present, the program may crash. func UTF16PtrToString(p *uint16) string { if p == nil { @@ -174,6 +174,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) //sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) @@ -187,6 +188,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FindClose(handle Handle) (err error) //sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) //sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) +//sys SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) //sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW //sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW //sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW @@ -243,6 +245,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW //sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW //sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW //sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW //sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) //sys UnmapViewOfFile(addr uintptr) (err error) @@ -259,6 +262,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore //sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore //sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore //sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain //sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain //sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext @@ -274,7 +278,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys SetConsoleCursorPosition(console Handle, position Coord) (err error) = kernel32.SetConsoleCursorPosition +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot @@ -390,11 +394,7 @@ func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) proc = uintptr(r0) if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -1091,11 +1091,7 @@ func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlap } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } @@ -1107,11 +1103,7 @@ func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overl } r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return err } @@ -1490,3 +1482,7 @@ func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf return languages, nil } } + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index da1652e74b013..e7ae37f8848bb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1772,3 +1772,32 @@ const ( MUI_LANGUAGE_INSTALLED = 0x20 MUI_LANGUAGE_LICENSED = 0x40 ) + +// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx +const ( + FileBasicInfo = 0 + FileStandardInfo = 1 + FileNameInfo = 2 + FileRenameInfo = 3 + FileDispositionInfo = 4 + FileAllocationInfo = 5 + FileEndOfFileInfo = 6 + FileStreamInfo = 7 + FileCompressionInfo = 8 + FileAttributeTagInfo = 9 + FileIdBothDirectoryInfo = 10 + FileIdBothDirectoryRestartInfo = 11 + FileIoPriorityHintInfo = 12 + FileRemoteProtocolInfo = 13 + FileFullDirectoryInfo = 14 + FileFullDirectoryRestartInfo = 15 + FileStorageInfo = 16 + FileAlignmentInfo = 17 + FileIdInfo = 18 + FileIdExtdDirectoryInfo = 19 + FileIdExtdDirectoryRestartInfo = 20 + FileDispositionInfoEx = 21 + FileRenameInfoEx = 22 + FileCaseSensitiveInfo = 23 + FileNormalizedNameInfo = 24 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9c048fd7dd7b6..8fbef7da66983 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -17,6 +17,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -24,7 +25,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -36,2076 +37,1619 @@ func errnoErr(e syscall.Errno) error { var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") modntdll = NewLazySystemDLL("ntdll.dll") + modole32 = NewLazySystemDLL("ole32.dll") modpsapi = NewLazySystemDLL("psapi.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procControlService = modadvapi32.NewProc("ControlService") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procCopySid = modadvapi32.NewProc("CopySid") procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procEqualSid = modadvapi32.NewProc("EqualSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetKernelObjectSecurity = modadvapi32.NewProc("SetKernelObjectSecurity") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procFindClose = modkernel32.NewProc("FindClose") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetACP = modkernel32.NewProc("GetACP") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLastError = modkernel32.NewProc("GetLastError") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") - procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") + procGetVersion = modkernel32.NewProc("GetVersion") procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLocalFree = modkernel32.NewProc("LocalFree") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procOpenThread = modkernel32.NewProc("OpenThread") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") + procPulseEvent = modkernel32.NewProc("PulseEvent") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procReadFile = modkernel32.NewProc("ReadFile") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procSleepEx = modkernel32.NewProc("SleepEx") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procWriteFile = modkernel32.NewProc("WriteFile") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procCoCreateGuid = modole32.NewProc("CoCreateGuid") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsendto = modws2_32.NewProc("sendto") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASend = modws2_32.NewProc("WSASend") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procbind = modws2_32.NewProc("bind") + procclosesocket = modws2_32.NewProc("closesocket") + procconnect = modws2_32.NewProc("connect") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetpeername = modws2_32.NewProc("getpeername") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procgetsockname = modws2_32.NewProc("getsockname") + procgetsockopt = modws2_32.NewProc("getsockopt") + proclisten = modws2_32.NewProc("listen") + procntohs = modws2_32.NewProc("ntohs") + procrecvfrom = modws2_32.NewProc("recvfrom") + procsendto = modws2_32.NewProc("sendto") + procsetsockopt = modws2_32.NewProc("setsockopt") + procshutdown = modws2_32.NewProc("shutdown") + procsocket = modws2_32.NewProc("socket") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") ) -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + err = errnoErr(e1) } return } -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return } - return + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) } -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + err = errnoErr(e1) } - return _LoadLibrary(_p0) + return } -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return } -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) } return } -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) return } -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { return } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) } -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) } return } -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { var _p0 uint32 - if *isWow64 { + if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) return } -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { var _p0 uint32 - if wait { + if *saclPresent { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) return } -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) return } -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 return } -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 return } -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 return } -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } return } -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + return +} + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + err = errnoErr(e1) } return } -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) if r0 != 0 { ret = syscall.Errno(r0) } return } -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) +func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { + r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { var _p0 uint32 - if bInheritHandle { + if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) } return } -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { var _p0 uint32 - if waitAll { + if saclPresent { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) +func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) } return } -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) } return } -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) } return } -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) } return } -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) handle = Handle(r0) if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { var _p0 uint32 - if watchSubTree { + if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) handle = Handle(r0) if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + err = errnoErr(e1) } return } -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return } -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetConsoleCursorPosition(console Handle, position Coord) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(*((*uint32)(unsafe.Pointer(&position)))), 0) +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) return } -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) return } -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return -} - -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +} + +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) return } @@ -2115,268 +1659,193 @@ func GetCurrentThreadId() (id uint32) { return } -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) return } -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + err = errnoErr(e1) } return } -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + err = errnoErr(e1) } return } -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 +func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) return } -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) } return } -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + err = errnoErr(e1) + } return } -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) } return } -func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) } return } @@ -2385,44 +1854,31 @@ func GetProcessId(process Handle) (id uint32, err error) { r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) id = uint32(r0) if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -2432,168 +1888,121 @@ func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintpt return } -func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) handle = Handle(r0) if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) +func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) return } -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) +func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) return } -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + err = errnoErr(e1) } return } -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + err = errnoErr(e1) } return } @@ -2601,11 +2010,15 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) } return } @@ -2613,11 +2026,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -2625,11 +2034,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -2637,1411 +2042,1068 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) } return } -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) +func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { + err = procIsWow64Process2.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return } - return + return _LoadLibraryEx(_p0, zero, flags) } -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + err = errnoErr(e1) } return } -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) } return } -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) } return } -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) } return } -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } return } -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } return } -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) } return } -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) +func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) } return } -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { var _p0 *byte if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + err = errnoErr(e1) } return } -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) return } -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetHostByName(_p0) -} - -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + err = errnoErr(e1) } return } -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return - } - return _GetServByName(_p0, _p1) -} - -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + err = errnoErr(e1) } - return _GetProtoByName(_p0) + return } -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) -} - -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) - return -} - -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 - return -} - -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) return } -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } - -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + err = errnoErr(e1) } return } -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + err = errnoErr(e1) } return } -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) } return } -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } return } -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) return } -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } return } -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 +func rtlGetVersion(info *OsVersionInfoEx) (ret error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } return } -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) return } -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) return } -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) +func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { + var _p0 *uint32 + if len(processIds) > 0 { + _p0 = &processIds[0] + } + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + err = errnoErr(e1) } return } -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + err = errnoErr(e1) } return } -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) } return } -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + err = errnoErr(e1) } return } -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) } return } -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + err = errnoErr(e1) } return } -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) return } -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) } return } -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } return } -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) + return } -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) + return } -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { - ret = syscall.Errno(r0) + sockerr = syscall.Errno(r0) } return } -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetHostByName(_p0) } -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + err = errnoErr(e1) } return } -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + return _GetProtoByName(_p0) } -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + err = errnoErr(e1) + } return } -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return } - return + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) } -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + err = errnoErr(e1) + } return } -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) return } -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) } return } -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + if r1 == socket_error { + err = errnoErr(e1) + } return } -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + err = errnoErr(e1) } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) + return } -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + err = errnoErr(e1) } return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) } return } -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) return } -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 7ffa365121cbf..647f2d4279e6d 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go new file mode 100644 index 0000000000000..c937d0976febb --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -0,0 +1,1955 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 17408 bytes (17.00 KiB). Checksum: df85fcbfe9b8377f. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 248 blocks, 15872 entries, 15872 bytes +// The third block is the zero block. +var bidiValues = [15872]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x895: 0x000c, 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa81: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaca: 0x000c, + 0xad2: 0x000c, 0xad3: 0x000c, 0xad4: 0x000c, 0xad6: 0x000c, + // Block 0x2c, offset 0xb00 + 0xb31: 0x000c, 0xb34: 0x000c, 0xb35: 0x000c, + 0xb36: 0x000c, 0xb37: 0x000c, 0xb38: 0x000c, 0xb39: 0x000c, 0xb3a: 0x000c, + 0xb3f: 0x0004, + // Block 0x2d, offset 0xb40 + 0xb47: 0x000c, 0xb48: 0x000c, 0xb49: 0x000c, 0xb4a: 0x000c, 0xb4b: 0x000c, + 0xb4c: 0x000c, 0xb4d: 0x000c, 0xb4e: 0x000c, + // Block 0x2e, offset 0xb80 + 0xbb1: 0x000c, 0xbb4: 0x000c, 0xbb5: 0x000c, + 0xbb6: 0x000c, 0xbb7: 0x000c, 0xbb8: 0x000c, 0xbb9: 0x000c, 0xbba: 0x000c, 0xbbb: 0x000c, + 0xbbc: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbc8: 0x000c, 0xbc9: 0x000c, 0xbca: 0x000c, 0xbcb: 0x000c, + 0xbcc: 0x000c, 0xbcd: 0x000c, + // Block 0x30, offset 0xc00 + 0xc18: 0x000c, 0xc19: 0x000c, + 0xc35: 0x000c, + 0xc37: 0x000c, 0xc39: 0x000c, 0xc3a: 0x003a, 0xc3b: 0x002a, + 0xc3c: 0x003a, 0xc3d: 0x002a, + // Block 0x31, offset 0xc40 + 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, 0xc7d: 0x000c, 0xc7e: 0x000c, + // Block 0x32, offset 0xc80 + 0xc80: 0x000c, 0xc81: 0x000c, 0xc82: 0x000c, 0xc83: 0x000c, 0xc84: 0x000c, + 0xc86: 0x000c, 0xc87: 0x000c, + 0xc8d: 0x000c, 0xc8e: 0x000c, 0xc8f: 0x000c, 0xc90: 0x000c, 0xc91: 0x000c, + 0xc92: 0x000c, 0xc93: 0x000c, 0xc94: 0x000c, 0xc95: 0x000c, 0xc96: 0x000c, 0xc97: 0x000c, + 0xc99: 0x000c, 0xc9a: 0x000c, 0xc9b: 0x000c, 0xc9c: 0x000c, 0xc9d: 0x000c, + 0xc9e: 0x000c, 0xc9f: 0x000c, 0xca0: 0x000c, 0xca1: 0x000c, 0xca2: 0x000c, 0xca3: 0x000c, + 0xca4: 0x000c, 0xca5: 0x000c, 0xca6: 0x000c, 0xca7: 0x000c, 0xca8: 0x000c, 0xca9: 0x000c, + 0xcaa: 0x000c, 0xcab: 0x000c, 0xcac: 0x000c, 0xcad: 0x000c, 0xcae: 0x000c, 0xcaf: 0x000c, + 0xcb0: 0x000c, 0xcb1: 0x000c, 0xcb2: 0x000c, 0xcb3: 0x000c, 0xcb4: 0x000c, 0xcb5: 0x000c, + 0xcb6: 0x000c, 0xcb7: 0x000c, 0xcb8: 0x000c, 0xcb9: 0x000c, 0xcba: 0x000c, 0xcbb: 0x000c, + 0xcbc: 0x000c, + // Block 0x33, offset 0xcc0 + 0xcc6: 0x000c, + // Block 0x34, offset 0xd00 + 0xd2d: 0x000c, 0xd2e: 0x000c, 0xd2f: 0x000c, + 0xd30: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, 0xd35: 0x000c, + 0xd36: 0x000c, 0xd37: 0x000c, 0xd39: 0x000c, 0xd3a: 0x000c, + 0xd3d: 0x000c, 0xd3e: 0x000c, + // Block 0x35, offset 0xd40 + 0xd58: 0x000c, 0xd59: 0x000c, + 0xd5e: 0x000c, 0xd5f: 0x000c, 0xd60: 0x000c, + 0xd71: 0x000c, 0xd72: 0x000c, 0xd73: 0x000c, 0xd74: 0x000c, + // Block 0x36, offset 0xd80 + 0xd82: 0x000c, 0xd85: 0x000c, + 0xd86: 0x000c, + 0xd8d: 0x000c, + 0xd9d: 0x000c, + // Block 0x37, offset 0xdc0 + 0xddd: 0x000c, + 0xdde: 0x000c, 0xddf: 0x000c, + // Block 0x38, offset 0xe00 + 0xe10: 0x000a, 0xe11: 0x000a, + 0xe12: 0x000a, 0xe13: 0x000a, 0xe14: 0x000a, 0xe15: 0x000a, 0xe16: 0x000a, 0xe17: 0x000a, + 0xe18: 0x000a, 0xe19: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0009, + 0xe9b: 0x007a, 0xe9c: 0x006a, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, 0xed4: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, 0xef4: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf12: 0x000c, 0xf13: 0x000c, + 0xf32: 0x000c, 0xf33: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf74: 0x000c, 0xf75: 0x000c, + 0xf77: 0x000c, 0xf78: 0x000c, 0xf79: 0x000c, 0xf7a: 0x000c, 0xf7b: 0x000c, + 0xf7c: 0x000c, 0xf7d: 0x000c, + // Block 0x3e, offset 0xf80 + 0xf86: 0x000c, 0xf89: 0x000c, 0xf8a: 0x000c, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000c, 0xf8f: 0x000c, 0xf90: 0x000c, 0xf91: 0x000c, + 0xf92: 0x000c, 0xf93: 0x000c, + 0xf9b: 0x0004, 0xf9d: 0x000c, + 0xfb0: 0x000a, 0xfb1: 0x000a, 0xfb2: 0x000a, 0xfb3: 0x000a, 0xfb4: 0x000a, 0xfb5: 0x000a, + 0xfb6: 0x000a, 0xfb7: 0x000a, 0xfb8: 0x000a, 0xfb9: 0x000a, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x000a, 0xfc1: 0x000a, 0xfc2: 0x000a, 0xfc3: 0x000a, 0xfc4: 0x000a, 0xfc5: 0x000a, + 0xfc6: 0x000a, 0xfc7: 0x000a, 0xfc8: 0x000a, 0xfc9: 0x000a, 0xfca: 0x000a, 0xfcb: 0x000c, + 0xfcc: 0x000c, 0xfcd: 0x000c, 0xfce: 0x000b, + // Block 0x40, offset 0x1000 + 0x1005: 0x000c, + 0x1006: 0x000c, + 0x1029: 0x000c, + // Block 0x41, offset 0x1040 + 0x1060: 0x000c, 0x1061: 0x000c, 0x1062: 0x000c, + 0x1067: 0x000c, 0x1068: 0x000c, + 0x1072: 0x000c, + 0x1079: 0x000c, 0x107a: 0x000c, 0x107b: 0x000c, + // Block 0x42, offset 0x1080 + 0x1080: 0x000a, 0x1084: 0x000a, 0x1085: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10de: 0x000a, 0x10df: 0x000a, 0x10e0: 0x000a, 0x10e1: 0x000a, 0x10e2: 0x000a, 0x10e3: 0x000a, + 0x10e4: 0x000a, 0x10e5: 0x000a, 0x10e6: 0x000a, 0x10e7: 0x000a, 0x10e8: 0x000a, 0x10e9: 0x000a, + 0x10ea: 0x000a, 0x10eb: 0x000a, 0x10ec: 0x000a, 0x10ed: 0x000a, 0x10ee: 0x000a, 0x10ef: 0x000a, + 0x10f0: 0x000a, 0x10f1: 0x000a, 0x10f2: 0x000a, 0x10f3: 0x000a, 0x10f4: 0x000a, 0x10f5: 0x000a, + 0x10f6: 0x000a, 0x10f7: 0x000a, 0x10f8: 0x000a, 0x10f9: 0x000a, 0x10fa: 0x000a, 0x10fb: 0x000a, + 0x10fc: 0x000a, 0x10fd: 0x000a, 0x10fe: 0x000a, 0x10ff: 0x000a, + // Block 0x44, offset 0x1100 + 0x1117: 0x000c, + 0x1118: 0x000c, 0x111b: 0x000c, + // Block 0x45, offset 0x1140 + 0x1156: 0x000c, + 0x1158: 0x000c, 0x1159: 0x000c, 0x115a: 0x000c, 0x115b: 0x000c, 0x115c: 0x000c, 0x115d: 0x000c, + 0x115e: 0x000c, 0x1160: 0x000c, 0x1162: 0x000c, + 0x1165: 0x000c, 0x1166: 0x000c, 0x1167: 0x000c, 0x1168: 0x000c, 0x1169: 0x000c, + 0x116a: 0x000c, 0x116b: 0x000c, 0x116c: 0x000c, + 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117f: 0x000c, + // Block 0x46, offset 0x1180 + 0x11b0: 0x000c, 0x11b1: 0x000c, 0x11b2: 0x000c, 0x11b3: 0x000c, 0x11b4: 0x000c, 0x11b5: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bb: 0x000c, + 0x11bc: 0x000c, 0x11bd: 0x000c, 0x11be: 0x000c, 0x11bf: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, 0x1202: 0x000c, 0x1203: 0x000c, + 0x1234: 0x000c, + 0x1236: 0x000c, 0x1237: 0x000c, 0x1238: 0x000c, 0x1239: 0x000c, 0x123a: 0x000c, + 0x123c: 0x000c, + // Block 0x49, offset 0x1240 + 0x1242: 0x000c, + 0x126b: 0x000c, 0x126c: 0x000c, 0x126d: 0x000c, 0x126e: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, 0x1272: 0x000c, 0x1273: 0x000c, + // Block 0x4a, offset 0x1280 + 0x1280: 0x000c, 0x1281: 0x000c, + 0x12a2: 0x000c, 0x12a3: 0x000c, + 0x12a4: 0x000c, 0x12a5: 0x000c, 0x12a8: 0x000c, 0x12a9: 0x000c, + 0x12ab: 0x000c, 0x12ac: 0x000c, 0x12ad: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12e6: 0x000c, 0x12e8: 0x000c, 0x12e9: 0x000c, + 0x12ed: 0x000c, 0x12ef: 0x000c, + 0x12f0: 0x000c, 0x12f1: 0x000c, + // Block 0x4c, offset 0x1300 + 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, + // Block 0x4d, offset 0x1340 + 0x1350: 0x000c, 0x1351: 0x000c, + 0x1352: 0x000c, 0x1354: 0x000c, 0x1355: 0x000c, 0x1356: 0x000c, 0x1357: 0x000c, + 0x1358: 0x000c, 0x1359: 0x000c, 0x135a: 0x000c, 0x135b: 0x000c, 0x135c: 0x000c, 0x135d: 0x000c, + 0x135e: 0x000c, 0x135f: 0x000c, 0x1360: 0x000c, 0x1362: 0x000c, 0x1363: 0x000c, + 0x1364: 0x000c, 0x1365: 0x000c, 0x1366: 0x000c, 0x1367: 0x000c, 0x1368: 0x000c, + 0x136d: 0x000c, + 0x1374: 0x000c, + 0x1378: 0x000c, 0x1379: 0x000c, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000c, 0x1381: 0x000c, 0x1382: 0x000c, 0x1383: 0x000c, 0x1384: 0x000c, 0x1385: 0x000c, + 0x1386: 0x000c, 0x1387: 0x000c, 0x1388: 0x000c, 0x1389: 0x000c, 0x138a: 0x000c, 0x138b: 0x000c, + 0x138c: 0x000c, 0x138d: 0x000c, 0x138e: 0x000c, 0x138f: 0x000c, 0x1390: 0x000c, 0x1391: 0x000c, + 0x1392: 0x000c, 0x1393: 0x000c, 0x1394: 0x000c, 0x1395: 0x000c, 0x1396: 0x000c, 0x1397: 0x000c, + 0x1398: 0x000c, 0x1399: 0x000c, 0x139a: 0x000c, 0x139b: 0x000c, 0x139c: 0x000c, 0x139d: 0x000c, + 0x139e: 0x000c, 0x139f: 0x000c, 0x13a0: 0x000c, 0x13a1: 0x000c, 0x13a2: 0x000c, 0x13a3: 0x000c, + 0x13a4: 0x000c, 0x13a5: 0x000c, 0x13a6: 0x000c, 0x13a7: 0x000c, 0x13a8: 0x000c, 0x13a9: 0x000c, + 0x13aa: 0x000c, 0x13ab: 0x000c, 0x13ac: 0x000c, 0x13ad: 0x000c, 0x13ae: 0x000c, 0x13af: 0x000c, + 0x13b0: 0x000c, 0x13b1: 0x000c, 0x13b2: 0x000c, 0x13b3: 0x000c, 0x13b4: 0x000c, 0x13b5: 0x000c, + 0x13b6: 0x000c, 0x13b7: 0x000c, 0x13b8: 0x000c, 0x13b9: 0x000c, 0x13bb: 0x000c, + 0x13bc: 0x000c, 0x13bd: 0x000c, 0x13be: 0x000c, 0x13bf: 0x000c, + // Block 0x4f, offset 0x13c0 + 0x13fd: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, + 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, + 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x000a, + 0x142d: 0x000a, 0x142e: 0x000a, 0x142f: 0x000a, + 0x143d: 0x000a, 0x143e: 0x000a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0009, 0x1441: 0x0009, 0x1442: 0x0009, 0x1443: 0x0009, 0x1444: 0x0009, 0x1445: 0x0009, + 0x1446: 0x0009, 0x1447: 0x0009, 0x1448: 0x0009, 0x1449: 0x0009, 0x144a: 0x0009, 0x144b: 0x000b, + 0x144c: 0x000b, 0x144d: 0x000b, 0x144f: 0x0001, 0x1450: 0x000a, 0x1451: 0x000a, + 0x1452: 0x000a, 0x1453: 0x000a, 0x1454: 0x000a, 0x1455: 0x000a, 0x1456: 0x000a, 0x1457: 0x000a, + 0x1458: 0x000a, 0x1459: 0x000a, 0x145a: 0x000a, 0x145b: 0x000a, 0x145c: 0x000a, 0x145d: 0x000a, + 0x145e: 0x000a, 0x145f: 0x000a, 0x1460: 0x000a, 0x1461: 0x000a, 0x1462: 0x000a, 0x1463: 0x000a, + 0x1464: 0x000a, 0x1465: 0x000a, 0x1466: 0x000a, 0x1467: 0x000a, 0x1468: 0x0009, 0x1469: 0x0007, + 0x146a: 0x000e, 0x146b: 0x000e, 0x146c: 0x000e, 0x146d: 0x000e, 0x146e: 0x000e, 0x146f: 0x0006, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x000a, + 0x1476: 0x000a, 0x1477: 0x000a, 0x1478: 0x000a, 0x1479: 0x000a, 0x147a: 0x000a, 0x147b: 0x000a, + 0x147c: 0x000a, 0x147d: 0x000a, 0x147e: 0x000a, 0x147f: 0x000a, + // Block 0x52, offset 0x1480 + 0x1480: 0x000a, 0x1481: 0x000a, 0x1482: 0x000a, 0x1483: 0x000a, 0x1484: 0x0006, 0x1485: 0x009a, + 0x1486: 0x008a, 0x1487: 0x000a, 0x1488: 0x000a, 0x1489: 0x000a, 0x148a: 0x000a, 0x148b: 0x000a, + 0x148c: 0x000a, 0x148d: 0x000a, 0x148e: 0x000a, 0x148f: 0x000a, 0x1490: 0x000a, 0x1491: 0x000a, + 0x1492: 0x000a, 0x1493: 0x000a, 0x1494: 0x000a, 0x1495: 0x000a, 0x1496: 0x000a, 0x1497: 0x000a, + 0x1498: 0x000a, 0x1499: 0x000a, 0x149a: 0x000a, 0x149b: 0x000a, 0x149c: 0x000a, 0x149d: 0x000a, + 0x149e: 0x000a, 0x149f: 0x0009, 0x14a0: 0x000b, 0x14a1: 0x000b, 0x14a2: 0x000b, 0x14a3: 0x000b, + 0x14a4: 0x000b, 0x14a5: 0x000b, 0x14a6: 0x000e, 0x14a7: 0x000e, 0x14a8: 0x000e, 0x14a9: 0x000e, + 0x14aa: 0x000b, 0x14ab: 0x000b, 0x14ac: 0x000b, 0x14ad: 0x000b, 0x14ae: 0x000b, 0x14af: 0x000b, + 0x14b0: 0x0002, 0x14b4: 0x0002, 0x14b5: 0x0002, + 0x14b6: 0x0002, 0x14b7: 0x0002, 0x14b8: 0x0002, 0x14b9: 0x0002, 0x14ba: 0x0003, 0x14bb: 0x0003, + 0x14bc: 0x000a, 0x14bd: 0x009a, 0x14be: 0x008a, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0002, 0x14c1: 0x0002, 0x14c2: 0x0002, 0x14c3: 0x0002, 0x14c4: 0x0002, 0x14c5: 0x0002, + 0x14c6: 0x0002, 0x14c7: 0x0002, 0x14c8: 0x0002, 0x14c9: 0x0002, 0x14ca: 0x0003, 0x14cb: 0x0003, + 0x14cc: 0x000a, 0x14cd: 0x009a, 0x14ce: 0x008a, + 0x14e0: 0x0004, 0x14e1: 0x0004, 0x14e2: 0x0004, 0x14e3: 0x0004, + 0x14e4: 0x0004, 0x14e5: 0x0004, 0x14e6: 0x0004, 0x14e7: 0x0004, 0x14e8: 0x0004, 0x14e9: 0x0004, + 0x14ea: 0x0004, 0x14eb: 0x0004, 0x14ec: 0x0004, 0x14ed: 0x0004, 0x14ee: 0x0004, 0x14ef: 0x0004, + 0x14f0: 0x0004, 0x14f1: 0x0004, 0x14f2: 0x0004, 0x14f3: 0x0004, 0x14f4: 0x0004, 0x14f5: 0x0004, + 0x14f6: 0x0004, 0x14f7: 0x0004, 0x14f8: 0x0004, 0x14f9: 0x0004, 0x14fa: 0x0004, 0x14fb: 0x0004, + 0x14fc: 0x0004, 0x14fd: 0x0004, 0x14fe: 0x0004, 0x14ff: 0x0004, + // Block 0x54, offset 0x1500 + 0x1500: 0x0004, 0x1501: 0x0004, 0x1502: 0x0004, 0x1503: 0x0004, 0x1504: 0x0004, 0x1505: 0x0004, + 0x1506: 0x0004, 0x1507: 0x0004, 0x1508: 0x0004, 0x1509: 0x0004, 0x150a: 0x0004, 0x150b: 0x0004, + 0x150c: 0x0004, 0x150d: 0x0004, 0x150e: 0x0004, 0x150f: 0x0004, 0x1510: 0x000c, 0x1511: 0x000c, + 0x1512: 0x000c, 0x1513: 0x000c, 0x1514: 0x000c, 0x1515: 0x000c, 0x1516: 0x000c, 0x1517: 0x000c, + 0x1518: 0x000c, 0x1519: 0x000c, 0x151a: 0x000c, 0x151b: 0x000c, 0x151c: 0x000c, 0x151d: 0x000c, + 0x151e: 0x000c, 0x151f: 0x000c, 0x1520: 0x000c, 0x1521: 0x000c, 0x1522: 0x000c, 0x1523: 0x000c, + 0x1524: 0x000c, 0x1525: 0x000c, 0x1526: 0x000c, 0x1527: 0x000c, 0x1528: 0x000c, 0x1529: 0x000c, + 0x152a: 0x000c, 0x152b: 0x000c, 0x152c: 0x000c, 0x152d: 0x000c, 0x152e: 0x000c, 0x152f: 0x000c, + 0x1530: 0x000c, + // Block 0x55, offset 0x1540 + 0x1540: 0x000a, 0x1541: 0x000a, 0x1543: 0x000a, 0x1544: 0x000a, 0x1545: 0x000a, + 0x1546: 0x000a, 0x1548: 0x000a, 0x1549: 0x000a, + 0x1554: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1565: 0x000a, 0x1567: 0x000a, 0x1569: 0x000a, + 0x156e: 0x0004, + 0x157a: 0x000a, 0x157b: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, + 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x000a, 0x15d3: 0x000a, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x000a, 0x1609: 0x000a, 0x160a: 0x000a, 0x160b: 0x000a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x000a, + 0x162a: 0x000a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + 0x1636: 0x000a, 0x1637: 0x000a, 0x1638: 0x000a, 0x1639: 0x000a, 0x163a: 0x000a, 0x163b: 0x000a, + 0x163c: 0x000a, 0x163d: 0x000a, 0x163e: 0x000a, 0x163f: 0x000a, + // Block 0x59, offset 0x1640 + 0x1640: 0x000a, 0x1641: 0x000a, 0x1642: 0x000a, 0x1643: 0x000a, 0x1644: 0x000a, 0x1645: 0x000a, + 0x1646: 0x000a, 0x1647: 0x000a, 0x1648: 0x000a, 0x1649: 0x000a, 0x164a: 0x000a, 0x164b: 0x000a, + 0x164c: 0x000a, 0x164d: 0x000a, 0x164e: 0x000a, 0x164f: 0x000a, 0x1650: 0x000a, 0x1651: 0x000a, + 0x1652: 0x0003, 0x1653: 0x0004, 0x1654: 0x000a, 0x1655: 0x000a, 0x1656: 0x000a, 0x1657: 0x000a, + 0x1658: 0x000a, 0x1659: 0x000a, 0x165a: 0x000a, 0x165b: 0x000a, 0x165c: 0x000a, 0x165d: 0x000a, + 0x165e: 0x000a, 0x165f: 0x000a, 0x1660: 0x000a, 0x1661: 0x000a, 0x1662: 0x000a, 0x1663: 0x000a, + 0x1664: 0x000a, 0x1665: 0x000a, 0x1666: 0x000a, 0x1667: 0x000a, 0x1668: 0x000a, 0x1669: 0x000a, + 0x166a: 0x000a, 0x166b: 0x000a, 0x166c: 0x000a, 0x166d: 0x000a, 0x166e: 0x000a, 0x166f: 0x000a, + 0x1670: 0x000a, 0x1671: 0x000a, 0x1672: 0x000a, 0x1673: 0x000a, 0x1674: 0x000a, 0x1675: 0x000a, + 0x1676: 0x000a, 0x1677: 0x000a, 0x1678: 0x000a, 0x1679: 0x000a, 0x167a: 0x000a, 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x003a, 0x1689: 0x002a, 0x168a: 0x003a, 0x168b: 0x002a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1695: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x009a, + 0x16aa: 0x008a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, 0x16ff: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, 0x174b: 0x000a, + 0x174c: 0x000a, 0x174d: 0x000a, 0x174e: 0x000a, 0x174f: 0x000a, 0x1750: 0x000a, 0x1751: 0x000a, + 0x1752: 0x000a, 0x1753: 0x000a, 0x1754: 0x000a, 0x1755: 0x000a, 0x1756: 0x000a, 0x1757: 0x000a, + 0x1758: 0x000a, 0x1759: 0x000a, 0x175a: 0x000a, 0x175b: 0x000a, 0x175c: 0x000a, 0x175d: 0x000a, + 0x175e: 0x000a, 0x175f: 0x000a, 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x000a, 0x1789: 0x000a, 0x178a: 0x000a, + 0x17a0: 0x000a, 0x17a1: 0x000a, 0x17a2: 0x000a, 0x17a3: 0x000a, + 0x17a4: 0x000a, 0x17a5: 0x000a, 0x17a6: 0x000a, 0x17a7: 0x000a, 0x17a8: 0x000a, 0x17a9: 0x000a, + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x0002, 0x17c9: 0x0002, 0x17ca: 0x0002, 0x17cb: 0x0002, + 0x17cc: 0x0002, 0x17cd: 0x0002, 0x17ce: 0x0002, 0x17cf: 0x0002, 0x17d0: 0x0002, 0x17d1: 0x0002, + 0x17d2: 0x0002, 0x17d3: 0x0002, 0x17d4: 0x0002, 0x17d5: 0x0002, 0x17d6: 0x0002, 0x17d7: 0x0002, + 0x17d8: 0x0002, 0x17d9: 0x0002, 0x17da: 0x0002, 0x17db: 0x0002, + // Block 0x60, offset 0x1800 + 0x182a: 0x000a, 0x182b: 0x000a, 0x182c: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x000a, 0x1869: 0x000a, + 0x186a: 0x000a, 0x186b: 0x000a, 0x186d: 0x000a, 0x186e: 0x000a, 0x186f: 0x000a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x000a, + 0x1886: 0x000a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x003a, 0x18b1: 0x002a, 0x18b2: 0x003a, 0x18b3: 0x002a, 0x18b4: 0x003a, 0x18b5: 0x002a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x000a, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x003a, 0x18e7: 0x002a, 0x18e8: 0x003a, 0x18e9: 0x002a, + 0x18ea: 0x003a, 0x18eb: 0x002a, 0x18ec: 0x003a, 0x18ed: 0x002a, 0x18ee: 0x003a, 0x18ef: 0x002a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x007a, 0x1904: 0x006a, 0x1905: 0x009a, + 0x1906: 0x008a, 0x1907: 0x00ba, 0x1908: 0x00aa, 0x1909: 0x009a, 0x190a: 0x008a, 0x190b: 0x007a, + 0x190c: 0x006a, 0x190d: 0x00da, 0x190e: 0x002a, 0x190f: 0x003a, 0x1910: 0x00ca, 0x1911: 0x009a, + 0x1912: 0x008a, 0x1913: 0x007a, 0x1914: 0x006a, 0x1915: 0x009a, 0x1916: 0x008a, 0x1917: 0x00ba, + 0x1918: 0x00aa, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x003a, 0x1959: 0x002a, 0x195a: 0x003a, 0x195b: 0x002a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x003a, 0x197d: 0x002a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19c9: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19d2: 0x000a, 0x19d3: 0x000a, 0x19d4: 0x000a, 0x19d5: 0x000a, 0x19d7: 0x000a, + 0x19d8: 0x000a, 0x19d9: 0x000a, 0x19da: 0x000a, 0x19db: 0x000a, 0x19dc: 0x000a, 0x19dd: 0x000a, + 0x19de: 0x000a, 0x19df: 0x000a, 0x19e0: 0x000a, 0x19e1: 0x000a, 0x19e2: 0x000a, 0x19e3: 0x000a, + 0x19e4: 0x000a, 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19eb: 0x000a, 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + 0x19f0: 0x000a, 0x19f1: 0x000a, 0x19f2: 0x000a, 0x19f3: 0x000a, 0x19f4: 0x000a, 0x19f5: 0x000a, + 0x19f6: 0x000a, 0x19f7: 0x000a, 0x19f8: 0x000a, 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, 0x20ac: 0x000c, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + 0x213f: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, 0x21fd: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x236a: 0x000a, 0x236b: 0x000a, + // Block 0x8e, offset 0x2380 + 0x23a5: 0x000c, 0x23a8: 0x000c, + 0x23ad: 0x000c, + // Block 0x8f, offset 0x23c0 + 0x23dd: 0x0001, + 0x23de: 0x000c, 0x23df: 0x0001, 0x23e0: 0x0001, 0x23e1: 0x0001, 0x23e2: 0x0001, 0x23e3: 0x0001, + 0x23e4: 0x0001, 0x23e5: 0x0001, 0x23e6: 0x0001, 0x23e7: 0x0001, 0x23e8: 0x0001, 0x23e9: 0x0003, + 0x23ea: 0x0001, 0x23eb: 0x0001, 0x23ec: 0x0001, 0x23ed: 0x0001, 0x23ee: 0x0001, 0x23ef: 0x0001, + 0x23f0: 0x0001, 0x23f1: 0x0001, 0x23f2: 0x0001, 0x23f3: 0x0001, 0x23f4: 0x0001, 0x23f5: 0x0001, + 0x23f6: 0x0001, 0x23f7: 0x0001, 0x23f8: 0x0001, 0x23f9: 0x0001, 0x23fa: 0x0001, 0x23fb: 0x0001, + 0x23fc: 0x0001, 0x23fd: 0x0001, 0x23fe: 0x0001, 0x23ff: 0x0001, + // Block 0x90, offset 0x2400 + 0x2400: 0x0001, 0x2401: 0x0001, 0x2402: 0x0001, 0x2403: 0x0001, 0x2404: 0x0001, 0x2405: 0x0001, + 0x2406: 0x0001, 0x2407: 0x0001, 0x2408: 0x0001, 0x2409: 0x0001, 0x240a: 0x0001, 0x240b: 0x0001, + 0x240c: 0x0001, 0x240d: 0x0001, 0x240e: 0x0001, 0x240f: 0x0001, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000d, 0x2451: 0x000d, + 0x2452: 0x000d, 0x2453: 0x000d, 0x2454: 0x000d, 0x2455: 0x000d, 0x2456: 0x000d, 0x2457: 0x000d, + 0x2458: 0x000d, 0x2459: 0x000d, 0x245a: 0x000d, 0x245b: 0x000d, 0x245c: 0x000d, 0x245d: 0x000d, + 0x245e: 0x000d, 0x245f: 0x000d, 0x2460: 0x000d, 0x2461: 0x000d, 0x2462: 0x000d, 0x2463: 0x000d, + 0x2464: 0x000d, 0x2465: 0x000d, 0x2466: 0x000d, 0x2467: 0x000d, 0x2468: 0x000d, 0x2469: 0x000d, + 0x246a: 0x000d, 0x246b: 0x000d, 0x246c: 0x000d, 0x246d: 0x000d, 0x246e: 0x000d, 0x246f: 0x000d, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000b, 0x2491: 0x000b, + 0x2492: 0x000b, 0x2493: 0x000b, 0x2494: 0x000b, 0x2495: 0x000b, 0x2496: 0x000b, 0x2497: 0x000b, + 0x2498: 0x000b, 0x2499: 0x000b, 0x249a: 0x000b, 0x249b: 0x000b, 0x249c: 0x000b, 0x249d: 0x000b, + 0x249e: 0x000b, 0x249f: 0x000b, 0x24a0: 0x000b, 0x24a1: 0x000b, 0x24a2: 0x000b, 0x24a3: 0x000b, + 0x24a4: 0x000b, 0x24a5: 0x000b, 0x24a6: 0x000b, 0x24a7: 0x000b, 0x24a8: 0x000b, 0x24a9: 0x000b, + 0x24aa: 0x000b, 0x24ab: 0x000b, 0x24ac: 0x000b, 0x24ad: 0x000b, 0x24ae: 0x000b, 0x24af: 0x000b, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000a, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000c, 0x24c1: 0x000c, 0x24c2: 0x000c, 0x24c3: 0x000c, 0x24c4: 0x000c, 0x24c5: 0x000c, + 0x24c6: 0x000c, 0x24c7: 0x000c, 0x24c8: 0x000c, 0x24c9: 0x000c, 0x24ca: 0x000c, 0x24cb: 0x000c, + 0x24cc: 0x000c, 0x24cd: 0x000c, 0x24ce: 0x000c, 0x24cf: 0x000c, 0x24d0: 0x000a, 0x24d1: 0x000a, + 0x24d2: 0x000a, 0x24d3: 0x000a, 0x24d4: 0x000a, 0x24d5: 0x000a, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x000a, + 0x24e0: 0x000c, 0x24e1: 0x000c, 0x24e2: 0x000c, 0x24e3: 0x000c, + 0x24e4: 0x000c, 0x24e5: 0x000c, 0x24e6: 0x000c, 0x24e7: 0x000c, 0x24e8: 0x000c, 0x24e9: 0x000c, + 0x24ea: 0x000c, 0x24eb: 0x000c, 0x24ec: 0x000c, 0x24ed: 0x000c, 0x24ee: 0x000c, 0x24ef: 0x000c, + 0x24f0: 0x000a, 0x24f1: 0x000a, 0x24f2: 0x000a, 0x24f3: 0x000a, 0x24f4: 0x000a, 0x24f5: 0x000a, + 0x24f6: 0x000a, 0x24f7: 0x000a, 0x24f8: 0x000a, 0x24f9: 0x000a, 0x24fa: 0x000a, 0x24fb: 0x000a, + 0x24fc: 0x000a, 0x24fd: 0x000a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x000a, 0x2504: 0x000a, 0x2505: 0x000a, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x000a, 0x2509: 0x000a, 0x250a: 0x000a, 0x250b: 0x000a, + 0x250c: 0x000a, 0x250d: 0x000a, 0x250e: 0x000a, 0x250f: 0x000a, 0x2510: 0x0006, 0x2511: 0x000a, + 0x2512: 0x0006, 0x2514: 0x000a, 0x2515: 0x0006, 0x2516: 0x000a, 0x2517: 0x000a, + 0x2518: 0x000a, 0x2519: 0x009a, 0x251a: 0x008a, 0x251b: 0x007a, 0x251c: 0x006a, 0x251d: 0x009a, + 0x251e: 0x008a, 0x251f: 0x0004, 0x2520: 0x000a, 0x2521: 0x000a, 0x2522: 0x0003, 0x2523: 0x0003, + 0x2524: 0x000a, 0x2525: 0x000a, 0x2526: 0x000a, 0x2528: 0x000a, 0x2529: 0x0004, + 0x252a: 0x0004, 0x252b: 0x000a, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000d, + // Block 0x95, offset 0x2540 + 0x2540: 0x000d, 0x2541: 0x000d, 0x2542: 0x000d, 0x2543: 0x000d, 0x2544: 0x000d, 0x2545: 0x000d, + 0x2546: 0x000d, 0x2547: 0x000d, 0x2548: 0x000d, 0x2549: 0x000d, 0x254a: 0x000d, 0x254b: 0x000d, + 0x254c: 0x000d, 0x254d: 0x000d, 0x254e: 0x000d, 0x254f: 0x000d, 0x2550: 0x000d, 0x2551: 0x000d, + 0x2552: 0x000d, 0x2553: 0x000d, 0x2554: 0x000d, 0x2555: 0x000d, 0x2556: 0x000d, 0x2557: 0x000d, + 0x2558: 0x000d, 0x2559: 0x000d, 0x255a: 0x000d, 0x255b: 0x000d, 0x255c: 0x000d, 0x255d: 0x000d, + 0x255e: 0x000d, 0x255f: 0x000d, 0x2560: 0x000d, 0x2561: 0x000d, 0x2562: 0x000d, 0x2563: 0x000d, + 0x2564: 0x000d, 0x2565: 0x000d, 0x2566: 0x000d, 0x2567: 0x000d, 0x2568: 0x000d, 0x2569: 0x000d, + 0x256a: 0x000d, 0x256b: 0x000d, 0x256c: 0x000d, 0x256d: 0x000d, 0x256e: 0x000d, 0x256f: 0x000d, + 0x2570: 0x000d, 0x2571: 0x000d, 0x2572: 0x000d, 0x2573: 0x000d, 0x2574: 0x000d, 0x2575: 0x000d, + 0x2576: 0x000d, 0x2577: 0x000d, 0x2578: 0x000d, 0x2579: 0x000d, 0x257a: 0x000d, 0x257b: 0x000d, + 0x257c: 0x000d, 0x257d: 0x000d, 0x257e: 0x000d, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, 0x2582: 0x000a, 0x2583: 0x0004, 0x2584: 0x0004, 0x2585: 0x0004, + 0x2586: 0x000a, 0x2587: 0x000a, 0x2588: 0x003a, 0x2589: 0x002a, 0x258a: 0x000a, 0x258b: 0x0003, + 0x258c: 0x0006, 0x258d: 0x0003, 0x258e: 0x0006, 0x258f: 0x0006, 0x2590: 0x0002, 0x2591: 0x0002, + 0x2592: 0x0002, 0x2593: 0x0002, 0x2594: 0x0002, 0x2595: 0x0002, 0x2596: 0x0002, 0x2597: 0x0002, + 0x2598: 0x0002, 0x2599: 0x0002, 0x259a: 0x0006, 0x259b: 0x000a, 0x259c: 0x000a, 0x259d: 0x000a, + 0x259e: 0x000a, 0x259f: 0x000a, 0x25a0: 0x000a, + 0x25bb: 0x005a, + 0x25bc: 0x000a, 0x25bd: 0x004a, 0x25be: 0x000a, 0x25bf: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, + 0x25db: 0x005a, 0x25dc: 0x000a, 0x25dd: 0x004a, + 0x25de: 0x000a, 0x25df: 0x00fa, 0x25e0: 0x00ea, 0x25e1: 0x000a, 0x25e2: 0x003a, 0x25e3: 0x002a, + 0x25e4: 0x000a, 0x25e5: 0x000a, + // Block 0x98, offset 0x2600 + 0x2620: 0x0004, 0x2621: 0x0004, 0x2622: 0x000a, 0x2623: 0x000a, + 0x2624: 0x000a, 0x2625: 0x0004, 0x2626: 0x0004, 0x2628: 0x000a, 0x2629: 0x000a, + 0x262a: 0x000a, 0x262b: 0x000a, 0x262c: 0x000a, 0x262d: 0x000a, 0x262e: 0x000a, + 0x2630: 0x000b, 0x2631: 0x000b, 0x2632: 0x000b, 0x2633: 0x000b, 0x2634: 0x000b, 0x2635: 0x000b, + 0x2636: 0x000b, 0x2637: 0x000b, 0x2638: 0x000b, 0x2639: 0x000a, 0x263a: 0x000a, 0x263b: 0x000a, + 0x263c: 0x000a, 0x263d: 0x000a, 0x263e: 0x000b, 0x263f: 0x000b, + // Block 0x99, offset 0x2640 + 0x2641: 0x000a, + // Block 0x9a, offset 0x2680 + 0x2680: 0x000a, 0x2681: 0x000a, 0x2682: 0x000a, 0x2683: 0x000a, 0x2684: 0x000a, 0x2685: 0x000a, + 0x2686: 0x000a, 0x2687: 0x000a, 0x2688: 0x000a, 0x2689: 0x000a, 0x268a: 0x000a, 0x268b: 0x000a, + 0x268c: 0x000a, 0x2690: 0x000a, 0x2691: 0x000a, + 0x2692: 0x000a, 0x2693: 0x000a, 0x2694: 0x000a, 0x2695: 0x000a, 0x2696: 0x000a, 0x2697: 0x000a, + 0x2698: 0x000a, 0x2699: 0x000a, 0x269a: 0x000a, 0x269b: 0x000a, 0x269c: 0x000a, + 0x26a0: 0x000a, + // Block 0x9b, offset 0x26c0 + 0x26fd: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2720: 0x000c, 0x2721: 0x0002, 0x2722: 0x0002, 0x2723: 0x0002, + 0x2724: 0x0002, 0x2725: 0x0002, 0x2726: 0x0002, 0x2727: 0x0002, 0x2728: 0x0002, 0x2729: 0x0002, + 0x272a: 0x0002, 0x272b: 0x0002, 0x272c: 0x0002, 0x272d: 0x0002, 0x272e: 0x0002, 0x272f: 0x0002, + 0x2730: 0x0002, 0x2731: 0x0002, 0x2732: 0x0002, 0x2733: 0x0002, 0x2734: 0x0002, 0x2735: 0x0002, + 0x2736: 0x0002, 0x2737: 0x0002, 0x2738: 0x0002, 0x2739: 0x0002, 0x273a: 0x0002, 0x273b: 0x0002, + // Block 0x9d, offset 0x2740 + 0x2776: 0x000c, 0x2777: 0x000c, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x000a, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x000c, 0x2802: 0x000c, 0x2803: 0x000c, 0x2804: 0x0001, 0x2805: 0x000c, + 0x2806: 0x000c, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x000c, 0x280d: 0x000c, 0x280e: 0x000c, 0x280f: 0x000c, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x000c, 0x2839: 0x000c, 0x283a: 0x000c, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x000c, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x0001, 0x287a: 0x0001, 0x287b: 0x0001, + 0x287c: 0x0001, 0x287d: 0x0001, 0x287e: 0x0001, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x0001, 0x28b1: 0x0001, 0x28b2: 0x0001, 0x28b3: 0x0001, 0x28b4: 0x0001, 0x28b5: 0x0001, + 0x28b6: 0x0001, 0x28b7: 0x0001, 0x28b8: 0x0001, 0x28b9: 0x000a, 0x28ba: 0x000a, 0x28bb: 0x000a, + 0x28bc: 0x000a, 0x28bd: 0x000a, 0x28be: 0x000a, 0x28bf: 0x000a, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000d, 0x28c7: 0x000d, 0x28c8: 0x000d, 0x28c9: 0x000d, 0x28ca: 0x000d, 0x28cb: 0x000d, + 0x28cc: 0x000d, 0x28cd: 0x000d, 0x28ce: 0x000d, 0x28cf: 0x000d, 0x28d0: 0x000d, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000c, 0x28e5: 0x000c, 0x28e6: 0x000c, 0x28e7: 0x000c, 0x28e8: 0x000d, 0x28e9: 0x000d, + 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d, + 0x28f0: 0x0005, 0x28f1: 0x0005, 0x28f2: 0x0005, 0x28f3: 0x0005, 0x28f4: 0x0005, 0x28f5: 0x0005, + 0x28f6: 0x0005, 0x28f7: 0x0005, 0x28f8: 0x0005, 0x28f9: 0x0005, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x0001, 0x2901: 0x0001, 0x2902: 0x0001, 0x2903: 0x0001, 0x2904: 0x0001, 0x2905: 0x0001, + 0x2906: 0x0001, 0x2907: 0x0001, 0x2908: 0x0001, 0x2909: 0x0001, 0x290a: 0x0001, 0x290b: 0x0001, + 0x290c: 0x0001, 0x290d: 0x0001, 0x290e: 0x0001, 0x290f: 0x0001, 0x2910: 0x0001, 0x2911: 0x0001, + 0x2912: 0x0001, 0x2913: 0x0001, 0x2914: 0x0001, 0x2915: 0x0001, 0x2916: 0x0001, 0x2917: 0x0001, + 0x2918: 0x0001, 0x2919: 0x0001, 0x291a: 0x0001, 0x291b: 0x0001, 0x291c: 0x0001, 0x291d: 0x0001, + 0x291e: 0x0001, 0x291f: 0x0001, 0x2920: 0x0005, 0x2921: 0x0005, 0x2922: 0x0005, 0x2923: 0x0005, + 0x2924: 0x0005, 0x2925: 0x0005, 0x2926: 0x0005, 0x2927: 0x0005, 0x2928: 0x0005, 0x2929: 0x0005, + 0x292a: 0x0005, 0x292b: 0x0005, 0x292c: 0x0005, 0x292d: 0x0005, 0x292e: 0x0005, 0x292f: 0x0005, + 0x2930: 0x0005, 0x2931: 0x0005, 0x2932: 0x0005, 0x2933: 0x0005, 0x2934: 0x0005, 0x2935: 0x0005, + 0x2936: 0x0005, 0x2937: 0x0005, 0x2938: 0x0005, 0x2939: 0x0005, 0x293a: 0x0005, 0x293b: 0x0005, + 0x293c: 0x0005, 0x293d: 0x0005, 0x293e: 0x0005, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2940: 0x0001, 0x2941: 0x0001, 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, 0x2945: 0x0001, + 0x2946: 0x0001, 0x2947: 0x0001, 0x2948: 0x0001, 0x2949: 0x0001, 0x294a: 0x0001, 0x294b: 0x0001, + 0x294c: 0x0001, 0x294d: 0x0001, 0x294e: 0x0001, 0x294f: 0x0001, 0x2950: 0x0001, 0x2951: 0x0001, + 0x2952: 0x0001, 0x2953: 0x0001, 0x2954: 0x0001, 0x2955: 0x0001, 0x2956: 0x0001, 0x2957: 0x0001, + 0x2958: 0x0001, 0x2959: 0x0001, 0x295a: 0x0001, 0x295b: 0x0001, 0x295c: 0x0001, 0x295d: 0x0001, + 0x295e: 0x0001, 0x295f: 0x0001, 0x2960: 0x0001, 0x2961: 0x0001, 0x2962: 0x0001, 0x2963: 0x0001, + 0x2964: 0x0001, 0x2965: 0x0001, 0x2966: 0x0001, 0x2967: 0x0001, 0x2968: 0x0001, 0x2969: 0x0001, + 0x296a: 0x0001, 0x296b: 0x000c, 0x296c: 0x000c, 0x296d: 0x0001, 0x296e: 0x0001, 0x296f: 0x0001, + 0x2970: 0x0001, 0x2971: 0x0001, 0x2972: 0x0001, 0x2973: 0x0001, 0x2974: 0x0001, 0x2975: 0x0001, + 0x2976: 0x0001, 0x2977: 0x0001, 0x2978: 0x0001, 0x2979: 0x0001, 0x297a: 0x0001, 0x297b: 0x0001, + 0x297c: 0x0001, 0x297d: 0x0001, 0x297e: 0x0001, 0x297f: 0x0001, + // Block 0xa6, offset 0x2980 + 0x2980: 0x0001, 0x2981: 0x0001, 0x2982: 0x0001, 0x2983: 0x0001, 0x2984: 0x0001, 0x2985: 0x0001, + 0x2986: 0x0001, 0x2987: 0x0001, 0x2988: 0x0001, 0x2989: 0x0001, 0x298a: 0x0001, 0x298b: 0x0001, + 0x298c: 0x0001, 0x298d: 0x0001, 0x298e: 0x0001, 0x298f: 0x0001, 0x2990: 0x0001, 0x2991: 0x0001, + 0x2992: 0x0001, 0x2993: 0x0001, 0x2994: 0x0001, 0x2995: 0x0001, 0x2996: 0x0001, 0x2997: 0x0001, + 0x2998: 0x0001, 0x2999: 0x0001, 0x299a: 0x0001, 0x299b: 0x0001, 0x299c: 0x0001, 0x299d: 0x0001, + 0x299e: 0x0001, 0x299f: 0x0001, 0x29a0: 0x0001, 0x29a1: 0x0001, 0x29a2: 0x0001, 0x29a3: 0x0001, + 0x29a4: 0x0001, 0x29a5: 0x0001, 0x29a6: 0x0001, 0x29a7: 0x0001, 0x29a8: 0x0001, 0x29a9: 0x0001, + 0x29aa: 0x0001, 0x29ab: 0x0001, 0x29ac: 0x0001, 0x29ad: 0x0001, 0x29ae: 0x0001, 0x29af: 0x0001, + 0x29b0: 0x000d, 0x29b1: 0x000d, 0x29b2: 0x000d, 0x29b3: 0x000d, 0x29b4: 0x000d, 0x29b5: 0x000d, + 0x29b6: 0x000d, 0x29b7: 0x000d, 0x29b8: 0x000d, 0x29b9: 0x000d, 0x29ba: 0x000d, 0x29bb: 0x000d, + 0x29bc: 0x000d, 0x29bd: 0x000d, 0x29be: 0x000d, 0x29bf: 0x000d, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000d, 0x29c1: 0x000d, 0x29c2: 0x000d, 0x29c3: 0x000d, 0x29c4: 0x000d, 0x29c5: 0x000d, + 0x29c6: 0x000c, 0x29c7: 0x000c, 0x29c8: 0x000c, 0x29c9: 0x000c, 0x29ca: 0x000c, 0x29cb: 0x000c, + 0x29cc: 0x000c, 0x29cd: 0x000c, 0x29ce: 0x000c, 0x29cf: 0x000c, 0x29d0: 0x000c, 0x29d1: 0x000d, + 0x29d2: 0x000d, 0x29d3: 0x000d, 0x29d4: 0x000d, 0x29d5: 0x000d, 0x29d6: 0x000d, 0x29d7: 0x000d, + 0x29d8: 0x000d, 0x29d9: 0x000d, 0x29da: 0x000d, 0x29db: 0x000d, 0x29dc: 0x000d, 0x29dd: 0x000d, + 0x29de: 0x000d, 0x29df: 0x000d, 0x29e0: 0x000d, 0x29e1: 0x000d, 0x29e2: 0x000d, 0x29e3: 0x000d, + 0x29e4: 0x000d, 0x29e5: 0x000d, 0x29e6: 0x000d, 0x29e7: 0x000d, 0x29e8: 0x000d, 0x29e9: 0x000d, + 0x29ea: 0x000d, 0x29eb: 0x000d, 0x29ec: 0x000d, 0x29ed: 0x000d, 0x29ee: 0x000d, 0x29ef: 0x000d, + 0x29f0: 0x0001, 0x29f1: 0x0001, 0x29f2: 0x0001, 0x29f3: 0x0001, 0x29f4: 0x0001, 0x29f5: 0x0001, + 0x29f6: 0x0001, 0x29f7: 0x0001, 0x29f8: 0x0001, 0x29f9: 0x0001, 0x29fa: 0x0001, 0x29fb: 0x0001, + 0x29fc: 0x0001, 0x29fd: 0x0001, 0x29fe: 0x0001, 0x29ff: 0x0001, + // Block 0xa8, offset 0x2a00 + 0x2a01: 0x000c, + 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, 0x2a3f: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x000c, 0x2a41: 0x000c, 0x2a42: 0x000c, 0x2a43: 0x000c, 0x2a44: 0x000c, 0x2a45: 0x000c, + 0x2a46: 0x000c, + 0x2a52: 0x000a, 0x2a53: 0x000a, 0x2a54: 0x000a, 0x2a55: 0x000a, 0x2a56: 0x000a, 0x2a57: 0x000a, + 0x2a58: 0x000a, 0x2a59: 0x000a, 0x2a5a: 0x000a, 0x2a5b: 0x000a, 0x2a5c: 0x000a, 0x2a5d: 0x000a, + 0x2a5e: 0x000a, 0x2a5f: 0x000a, 0x2a60: 0x000a, 0x2a61: 0x000a, 0x2a62: 0x000a, 0x2a63: 0x000a, + 0x2a64: 0x000a, 0x2a65: 0x000a, + 0x2a7f: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab3: 0x000c, 0x2ab4: 0x000c, 0x2ab5: 0x000c, + 0x2ab6: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, 0x2ac2: 0x000c, + 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, 0x2aeb: 0x000c, 0x2aed: 0x000c, 0x2aee: 0x000c, 0x2aef: 0x000c, + 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af2: 0x000c, 0x2af3: 0x000c, 0x2af4: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b33: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, 0x2b41: 0x000c, + 0x2b76: 0x000c, 0x2b77: 0x000c, 0x2b78: 0x000c, 0x2b79: 0x000c, 0x2b7a: 0x000c, 0x2b7b: 0x000c, + 0x2b7c: 0x000c, 0x2b7d: 0x000c, 0x2b7e: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b89: 0x000c, 0x2b8a: 0x000c, 0x2b8b: 0x000c, + 0x2b8c: 0x000c, 0x2b8f: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bef: 0x000c, + 0x2bf0: 0x000c, 0x2bf1: 0x000c, 0x2bf4: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, + 0x2bfe: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c1f: 0x000c, 0x2c23: 0x000c, + 0x2c24: 0x000c, 0x2c25: 0x000c, 0x2c26: 0x000c, 0x2c27: 0x000c, 0x2c28: 0x000c, 0x2c29: 0x000c, + 0x2c2a: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, + 0x2c66: 0x000c, 0x2c67: 0x000c, 0x2c68: 0x000c, 0x2c69: 0x000c, + 0x2c6a: 0x000c, 0x2c6b: 0x000c, 0x2c6c: 0x000c, + 0x2c70: 0x000c, 0x2c71: 0x000c, 0x2c72: 0x000c, 0x2c73: 0x000c, 0x2c74: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb8: 0x000c, 0x2cb9: 0x000c, 0x2cba: 0x000c, 0x2cbb: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbe: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc2: 0x000c, 0x2cc3: 0x000c, 0x2cc4: 0x000c, + 0x2cc6: 0x000c, + 0x2cde: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d3a: 0x000c, + 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, 0x2d42: 0x000c, 0x2d43: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2dbc: 0x000c, 0x2dbd: 0x000c, 0x2dbf: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x000c, + 0x2ddc: 0x000c, 0x2ddd: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, + 0x2e3d: 0x000c, 0x2e3f: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x000c, + 0x2e60: 0x000a, 0x2e61: 0x000a, 0x2e62: 0x000a, 0x2e63: 0x000a, + 0x2e64: 0x000a, 0x2e65: 0x000a, 0x2e66: 0x000a, 0x2e67: 0x000a, 0x2e68: 0x000a, 0x2e69: 0x000a, + 0x2e6a: 0x000a, 0x2e6b: 0x000a, 0x2e6c: 0x000a, + // Block 0xba, offset 0x2e80 + 0x2eab: 0x000c, 0x2ead: 0x000c, + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb7: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2edd: 0x000c, + 0x2ede: 0x000c, 0x2edf: 0x000c, 0x2ee2: 0x000c, 0x2ee3: 0x000c, + 0x2ee4: 0x000c, 0x2ee5: 0x000c, 0x2ee7: 0x000c, 0x2ee8: 0x000c, 0x2ee9: 0x000c, + 0x2eea: 0x000c, 0x2eeb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f2f: 0x000c, + 0x2f30: 0x000c, 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f36: 0x000c, 0x2f37: 0x000c, 0x2f39: 0x000c, 0x2f3a: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7e: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f83: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fd4: 0x000c, 0x2fd5: 0x000c, 0x2fd6: 0x000c, 0x2fd7: 0x000c, + 0x2fda: 0x000c, 0x2fdb: 0x000c, + 0x2fe0: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3006: 0x000c, 0x3009: 0x000c, 0x300a: 0x000c, + 0x3033: 0x000c, 0x3034: 0x000c, 0x3035: 0x000c, + 0x3036: 0x000c, 0x3037: 0x000c, 0x3038: 0x000c, 0x303b: 0x000c, + 0x303c: 0x000c, 0x303d: 0x000c, 0x303e: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3047: 0x000c, + 0x3051: 0x000c, + 0x3052: 0x000c, 0x3053: 0x000c, 0x3054: 0x000c, 0x3055: 0x000c, 0x3056: 0x000c, + 0x3059: 0x000c, 0x305a: 0x000c, 0x305b: 0x000c, + // Block 0xc2, offset 0x3080 + 0x308a: 0x000c, 0x308b: 0x000c, + 0x308c: 0x000c, 0x308d: 0x000c, 0x308e: 0x000c, 0x308f: 0x000c, 0x3090: 0x000c, 0x3091: 0x000c, + 0x3092: 0x000c, 0x3093: 0x000c, 0x3094: 0x000c, 0x3095: 0x000c, 0x3096: 0x000c, + 0x3098: 0x000c, 0x3099: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, 0x30f5: 0x000c, + 0x30f6: 0x000c, 0x30f8: 0x000c, 0x30f9: 0x000c, 0x30fa: 0x000c, 0x30fb: 0x000c, + 0x30fc: 0x000c, 0x30fd: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3112: 0x000c, 0x3113: 0x000c, 0x3114: 0x000c, 0x3115: 0x000c, 0x3116: 0x000c, 0x3117: 0x000c, + 0x3118: 0x000c, 0x3119: 0x000c, 0x311a: 0x000c, 0x311b: 0x000c, 0x311c: 0x000c, 0x311d: 0x000c, + 0x311e: 0x000c, 0x311f: 0x000c, 0x3120: 0x000c, 0x3121: 0x000c, 0x3122: 0x000c, 0x3123: 0x000c, + 0x3124: 0x000c, 0x3125: 0x000c, 0x3126: 0x000c, 0x3127: 0x000c, + 0x312a: 0x000c, 0x312b: 0x000c, 0x312c: 0x000c, 0x312d: 0x000c, 0x312e: 0x000c, 0x312f: 0x000c, + 0x3130: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, 0x317a: 0x000c, + 0x317c: 0x000c, 0x317d: 0x000c, 0x317f: 0x000c, + // Block 0xc6, offset 0x3180 + 0x3180: 0x000c, 0x3181: 0x000c, 0x3182: 0x000c, 0x3183: 0x000c, 0x3184: 0x000c, 0x3185: 0x000c, + 0x3187: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d5: 0x000c, 0x31d7: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3233: 0x000c, 0x3234: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3255: 0x000a, 0x3256: 0x000a, 0x3257: 0x000a, + 0x3258: 0x000a, 0x3259: 0x000a, 0x325a: 0x000a, 0x325b: 0x000a, 0x325c: 0x000a, 0x325d: 0x0004, + 0x325e: 0x0004, 0x325f: 0x0004, 0x3260: 0x0004, 0x3261: 0x000a, 0x3262: 0x000a, 0x3263: 0x000a, + 0x3264: 0x000a, 0x3265: 0x000a, 0x3266: 0x000a, 0x3267: 0x000a, 0x3268: 0x000a, 0x3269: 0x000a, + 0x326a: 0x000a, 0x326b: 0x000a, 0x326c: 0x000a, 0x326d: 0x000a, 0x326e: 0x000a, 0x326f: 0x000a, + 0x3270: 0x000a, 0x3271: 0x000a, + // Block 0xca, offset 0x3280 + 0x32b0: 0x000c, 0x32b1: 0x000c, 0x32b2: 0x000c, 0x32b3: 0x000c, 0x32b4: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32f0: 0x000c, 0x32f1: 0x000c, 0x32f2: 0x000c, 0x32f3: 0x000c, 0x32f4: 0x000c, 0x32f5: 0x000c, + 0x32f6: 0x000c, + // Block 0xcc, offset 0x3300 + 0x330f: 0x000c, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000c, 0x3350: 0x000c, 0x3351: 0x000c, + 0x3352: 0x000c, + // Block 0xce, offset 0x3380 + 0x33a2: 0x000a, + 0x33a4: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33dd: 0x000c, + 0x33de: 0x000c, 0x33e0: 0x000b, 0x33e1: 0x000b, 0x33e2: 0x000b, 0x33e3: 0x000b, + // Block 0xd0, offset 0x3400 + 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x3433: 0x000b, 0x3434: 0x000b, 0x3435: 0x000b, + 0x3436: 0x000b, 0x3437: 0x000b, 0x3438: 0x000b, 0x3439: 0x000b, 0x343a: 0x000b, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, 0x346d: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3480: 0x000a, 0x3481: 0x000a, 0x3482: 0x000c, 0x3483: 0x000c, 0x3484: 0x000c, 0x3485: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, 0x34d3: 0x000a, 0x34d4: 0x000a, 0x34d5: 0x000a, 0x34d6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x351b: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3555: 0x000a, + // Block 0xd6, offset 0x3580 + 0x358f: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c9: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3603: 0x000a, + 0x360e: 0x0002, 0x360f: 0x0002, 0x3610: 0x0002, 0x3611: 0x0002, + 0x3612: 0x0002, 0x3613: 0x0002, 0x3614: 0x0002, 0x3615: 0x0002, 0x3616: 0x0002, 0x3617: 0x0002, + 0x3618: 0x0002, 0x3619: 0x0002, 0x361a: 0x0002, 0x361b: 0x0002, 0x361c: 0x0002, 0x361d: 0x0002, + 0x361e: 0x0002, 0x361f: 0x0002, 0x3620: 0x0002, 0x3621: 0x0002, 0x3622: 0x0002, 0x3623: 0x0002, + 0x3624: 0x0002, 0x3625: 0x0002, 0x3626: 0x0002, 0x3627: 0x0002, 0x3628: 0x0002, 0x3629: 0x0002, + 0x362a: 0x0002, 0x362b: 0x0002, 0x362c: 0x0002, 0x362d: 0x0002, 0x362e: 0x0002, 0x362f: 0x0002, + 0x3630: 0x0002, 0x3631: 0x0002, 0x3632: 0x0002, 0x3633: 0x0002, 0x3634: 0x0002, 0x3635: 0x0002, + 0x3636: 0x0002, 0x3637: 0x0002, 0x3638: 0x0002, 0x3639: 0x0002, 0x363a: 0x0002, 0x363b: 0x0002, + 0x363c: 0x0002, 0x363d: 0x0002, 0x363e: 0x0002, 0x363f: 0x0002, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000c, 0x3641: 0x000c, 0x3642: 0x000c, 0x3643: 0x000c, 0x3644: 0x000c, 0x3645: 0x000c, + 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x000c, + 0x364c: 0x000c, 0x364d: 0x000c, 0x364e: 0x000c, 0x364f: 0x000c, 0x3650: 0x000c, 0x3651: 0x000c, + 0x3652: 0x000c, 0x3653: 0x000c, 0x3654: 0x000c, 0x3655: 0x000c, 0x3656: 0x000c, 0x3657: 0x000c, + 0x3658: 0x000c, 0x3659: 0x000c, 0x365a: 0x000c, 0x365b: 0x000c, 0x365c: 0x000c, 0x365d: 0x000c, + 0x365e: 0x000c, 0x365f: 0x000c, 0x3660: 0x000c, 0x3661: 0x000c, 0x3662: 0x000c, 0x3663: 0x000c, + 0x3664: 0x000c, 0x3665: 0x000c, 0x3666: 0x000c, 0x3667: 0x000c, 0x3668: 0x000c, 0x3669: 0x000c, + 0x366a: 0x000c, 0x366b: 0x000c, 0x366c: 0x000c, 0x366d: 0x000c, 0x366e: 0x000c, 0x366f: 0x000c, + 0x3670: 0x000c, 0x3671: 0x000c, 0x3672: 0x000c, 0x3673: 0x000c, 0x3674: 0x000c, 0x3675: 0x000c, + 0x3676: 0x000c, 0x367b: 0x000c, + 0x367c: 0x000c, 0x367d: 0x000c, 0x367e: 0x000c, 0x367f: 0x000c, + // Block 0xda, offset 0x3680 + 0x3680: 0x000c, 0x3681: 0x000c, 0x3682: 0x000c, 0x3683: 0x000c, 0x3684: 0x000c, 0x3685: 0x000c, + 0x3686: 0x000c, 0x3687: 0x000c, 0x3688: 0x000c, 0x3689: 0x000c, 0x368a: 0x000c, 0x368b: 0x000c, + 0x368c: 0x000c, 0x368d: 0x000c, 0x368e: 0x000c, 0x368f: 0x000c, 0x3690: 0x000c, 0x3691: 0x000c, + 0x3692: 0x000c, 0x3693: 0x000c, 0x3694: 0x000c, 0x3695: 0x000c, 0x3696: 0x000c, 0x3697: 0x000c, + 0x3698: 0x000c, 0x3699: 0x000c, 0x369a: 0x000c, 0x369b: 0x000c, 0x369c: 0x000c, 0x369d: 0x000c, + 0x369e: 0x000c, 0x369f: 0x000c, 0x36a0: 0x000c, 0x36a1: 0x000c, 0x36a2: 0x000c, 0x36a3: 0x000c, + 0x36a4: 0x000c, 0x36a5: 0x000c, 0x36a6: 0x000c, 0x36a7: 0x000c, 0x36a8: 0x000c, 0x36a9: 0x000c, + 0x36aa: 0x000c, 0x36ab: 0x000c, 0x36ac: 0x000c, + 0x36b5: 0x000c, + // Block 0xdb, offset 0x36c0 + 0x36c4: 0x000c, + 0x36db: 0x000c, 0x36dc: 0x000c, 0x36dd: 0x000c, + 0x36de: 0x000c, 0x36df: 0x000c, 0x36e1: 0x000c, 0x36e2: 0x000c, 0x36e3: 0x000c, + 0x36e4: 0x000c, 0x36e5: 0x000c, 0x36e6: 0x000c, 0x36e7: 0x000c, 0x36e8: 0x000c, 0x36e9: 0x000c, + 0x36ea: 0x000c, 0x36eb: 0x000c, 0x36ec: 0x000c, 0x36ed: 0x000c, 0x36ee: 0x000c, 0x36ef: 0x000c, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000c, 0x3701: 0x000c, 0x3702: 0x000c, 0x3703: 0x000c, 0x3704: 0x000c, 0x3705: 0x000c, + 0x3706: 0x000c, 0x3708: 0x000c, 0x3709: 0x000c, 0x370a: 0x000c, 0x370b: 0x000c, + 0x370c: 0x000c, 0x370d: 0x000c, 0x370e: 0x000c, 0x370f: 0x000c, 0x3710: 0x000c, 0x3711: 0x000c, + 0x3712: 0x000c, 0x3713: 0x000c, 0x3714: 0x000c, 0x3715: 0x000c, 0x3716: 0x000c, 0x3717: 0x000c, + 0x3718: 0x000c, 0x371b: 0x000c, 0x371c: 0x000c, 0x371d: 0x000c, + 0x371e: 0x000c, 0x371f: 0x000c, 0x3720: 0x000c, 0x3721: 0x000c, 0x3723: 0x000c, + 0x3724: 0x000c, 0x3726: 0x000c, 0x3727: 0x000c, 0x3728: 0x000c, 0x3729: 0x000c, + 0x372a: 0x000c, + // Block 0xdd, offset 0x3740 + 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x377f: 0x0004, + // Block 0xde, offset 0x3780 + 0x3780: 0x0001, 0x3781: 0x0001, 0x3782: 0x0001, 0x3783: 0x0001, 0x3784: 0x0001, 0x3785: 0x0001, + 0x3786: 0x0001, 0x3787: 0x0001, 0x3788: 0x0001, 0x3789: 0x0001, 0x378a: 0x0001, 0x378b: 0x0001, + 0x378c: 0x0001, 0x378d: 0x0001, 0x378e: 0x0001, 0x378f: 0x0001, 0x3790: 0x000c, 0x3791: 0x000c, + 0x3792: 0x000c, 0x3793: 0x000c, 0x3794: 0x000c, 0x3795: 0x000c, 0x3796: 0x000c, 0x3797: 0x0001, + 0x3798: 0x0001, 0x3799: 0x0001, 0x379a: 0x0001, 0x379b: 0x0001, 0x379c: 0x0001, 0x379d: 0x0001, + 0x379e: 0x0001, 0x379f: 0x0001, 0x37a0: 0x0001, 0x37a1: 0x0001, 0x37a2: 0x0001, 0x37a3: 0x0001, + 0x37a4: 0x0001, 0x37a5: 0x0001, 0x37a6: 0x0001, 0x37a7: 0x0001, 0x37a8: 0x0001, 0x37a9: 0x0001, + 0x37aa: 0x0001, 0x37ab: 0x0001, 0x37ac: 0x0001, 0x37ad: 0x0001, 0x37ae: 0x0001, 0x37af: 0x0001, + 0x37b0: 0x0001, 0x37b1: 0x0001, 0x37b2: 0x0001, 0x37b3: 0x0001, 0x37b4: 0x0001, 0x37b5: 0x0001, + 0x37b6: 0x0001, 0x37b7: 0x0001, 0x37b8: 0x0001, 0x37b9: 0x0001, 0x37ba: 0x0001, 0x37bb: 0x0001, + 0x37bc: 0x0001, 0x37bd: 0x0001, 0x37be: 0x0001, 0x37bf: 0x0001, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0001, 0x37c1: 0x0001, 0x37c2: 0x0001, 0x37c3: 0x0001, 0x37c4: 0x000c, 0x37c5: 0x000c, + 0x37c6: 0x000c, 0x37c7: 0x000c, 0x37c8: 0x000c, 0x37c9: 0x000c, 0x37ca: 0x000c, 0x37cb: 0x0001, + 0x37cc: 0x0001, 0x37cd: 0x0001, 0x37ce: 0x0001, 0x37cf: 0x0001, 0x37d0: 0x0001, 0x37d1: 0x0001, + 0x37d2: 0x0001, 0x37d3: 0x0001, 0x37d4: 0x0001, 0x37d5: 0x0001, 0x37d6: 0x0001, 0x37d7: 0x0001, + 0x37d8: 0x0001, 0x37d9: 0x0001, 0x37da: 0x0001, 0x37db: 0x0001, 0x37dc: 0x0001, 0x37dd: 0x0001, + 0x37de: 0x0001, 0x37df: 0x0001, 0x37e0: 0x0001, 0x37e1: 0x0001, 0x37e2: 0x0001, 0x37e3: 0x0001, + 0x37e4: 0x0001, 0x37e5: 0x0001, 0x37e6: 0x0001, 0x37e7: 0x0001, 0x37e8: 0x0001, 0x37e9: 0x0001, + 0x37ea: 0x0001, 0x37eb: 0x0001, 0x37ec: 0x0001, 0x37ed: 0x0001, 0x37ee: 0x0001, 0x37ef: 0x0001, + 0x37f0: 0x0001, 0x37f1: 0x0001, 0x37f2: 0x0001, 0x37f3: 0x0001, 0x37f4: 0x0001, 0x37f5: 0x0001, + 0x37f6: 0x0001, 0x37f7: 0x0001, 0x37f8: 0x0001, 0x37f9: 0x0001, 0x37fa: 0x0001, 0x37fb: 0x0001, + 0x37fc: 0x0001, 0x37fd: 0x0001, 0x37fe: 0x0001, 0x37ff: 0x0001, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000d, 0x3801: 0x000d, 0x3802: 0x000d, 0x3803: 0x000d, 0x3804: 0x000d, 0x3805: 0x000d, + 0x3806: 0x000d, 0x3807: 0x000d, 0x3808: 0x000d, 0x3809: 0x000d, 0x380a: 0x000d, 0x380b: 0x000d, + 0x380c: 0x000d, 0x380d: 0x000d, 0x380e: 0x000d, 0x380f: 0x000d, 0x3810: 0x0001, 0x3811: 0x0001, + 0x3812: 0x0001, 0x3813: 0x0001, 0x3814: 0x0001, 0x3815: 0x0001, 0x3816: 0x0001, 0x3817: 0x0001, + 0x3818: 0x0001, 0x3819: 0x0001, 0x381a: 0x0001, 0x381b: 0x0001, 0x381c: 0x0001, 0x381d: 0x0001, + 0x381e: 0x0001, 0x381f: 0x0001, 0x3820: 0x0001, 0x3821: 0x0001, 0x3822: 0x0001, 0x3823: 0x0001, + 0x3824: 0x0001, 0x3825: 0x0001, 0x3826: 0x0001, 0x3827: 0x0001, 0x3828: 0x0001, 0x3829: 0x0001, + 0x382a: 0x0001, 0x382b: 0x0001, 0x382c: 0x0001, 0x382d: 0x0001, 0x382e: 0x0001, 0x382f: 0x0001, + 0x3830: 0x0001, 0x3831: 0x0001, 0x3832: 0x0001, 0x3833: 0x0001, 0x3834: 0x0001, 0x3835: 0x0001, + 0x3836: 0x0001, 0x3837: 0x0001, 0x3838: 0x0001, 0x3839: 0x0001, 0x383a: 0x0001, 0x383b: 0x0001, + 0x383c: 0x0001, 0x383d: 0x0001, 0x383e: 0x0001, 0x383f: 0x0001, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000d, 0x3841: 0x000d, 0x3842: 0x000d, 0x3843: 0x000d, 0x3844: 0x000d, 0x3845: 0x000d, + 0x3846: 0x000d, 0x3847: 0x000d, 0x3848: 0x000d, 0x3849: 0x000d, 0x384a: 0x000d, 0x384b: 0x000d, + 0x384c: 0x000d, 0x384d: 0x000d, 0x384e: 0x000d, 0x384f: 0x000d, 0x3850: 0x000d, 0x3851: 0x000d, + 0x3852: 0x000d, 0x3853: 0x000d, 0x3854: 0x000d, 0x3855: 0x000d, 0x3856: 0x000d, 0x3857: 0x000d, + 0x3858: 0x000d, 0x3859: 0x000d, 0x385a: 0x000d, 0x385b: 0x000d, 0x385c: 0x000d, 0x385d: 0x000d, + 0x385e: 0x000d, 0x385f: 0x000d, 0x3860: 0x000d, 0x3861: 0x000d, 0x3862: 0x000d, 0x3863: 0x000d, + 0x3864: 0x000d, 0x3865: 0x000d, 0x3866: 0x000d, 0x3867: 0x000d, 0x3868: 0x000d, 0x3869: 0x000d, + 0x386a: 0x000d, 0x386b: 0x000d, 0x386c: 0x000d, 0x386d: 0x000d, 0x386e: 0x000d, 0x386f: 0x000d, + 0x3870: 0x000a, 0x3871: 0x000a, 0x3872: 0x000d, 0x3873: 0x000d, 0x3874: 0x000d, 0x3875: 0x000d, + 0x3876: 0x000d, 0x3877: 0x000d, 0x3878: 0x000d, 0x3879: 0x000d, 0x387a: 0x000d, 0x387b: 0x000d, + 0x387c: 0x000d, 0x387d: 0x000d, 0x387e: 0x000d, 0x387f: 0x000d, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, 0x38bf: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, + 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, + 0x38f1: 0x000a, 0x38f2: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38f7: 0x000a, 0x38f8: 0x000a, 0x38f9: 0x000a, 0x38fa: 0x000a, 0x38fb: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a, + 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a, + 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0002, 0x3941: 0x0002, 0x3942: 0x0002, 0x3943: 0x0002, 0x3944: 0x0002, 0x3945: 0x0002, + 0x3946: 0x0002, 0x3947: 0x0002, 0x3948: 0x0002, 0x3949: 0x0002, 0x394a: 0x0002, 0x394b: 0x000a, + 0x394c: 0x000a, 0x394d: 0x000a, 0x394e: 0x000a, 0x394f: 0x000a, + 0x396f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, 0x39ae: 0x000a, 0x39af: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39ed: 0x000a, + // Block 0xe8, offset 0x3a00 + 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a, + 0x3a24: 0x000a, 0x3a25: 0x000a, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a, + 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a, + 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a, + 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a, + 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a, 0x3a63: 0x000a, + 0x3a64: 0x000a, 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a, + 0x3a6a: 0x000a, 0x3a6b: 0x000a, 0x3a6c: 0x000a, + 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a, + 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a, + 0x3a7c: 0x000a, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a, + 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a, 0x3a8b: 0x000a, + 0x3a8c: 0x000a, 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a, + 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a, + 0x3a98: 0x000a, + 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a, + 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a, + 0x3aaa: 0x000a, 0x3aab: 0x000a, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a, + 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a, + 0x3ad0: 0x000a, 0x3ad1: 0x000a, + 0x3ad2: 0x000a, 0x3ad3: 0x000a, 0x3ad4: 0x000a, 0x3ad5: 0x000a, 0x3ad6: 0x000a, 0x3ad7: 0x000a, + 0x3ad8: 0x000a, 0x3ad9: 0x000a, 0x3ada: 0x000a, 0x3adb: 0x000a, 0x3adc: 0x000a, 0x3add: 0x000a, + 0x3ade: 0x000a, 0x3adf: 0x000a, 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a, + 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a, + 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a, 0x3aee: 0x000a, 0x3aef: 0x000a, + 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a, 0x3af4: 0x000a, 0x3af5: 0x000a, + 0x3af6: 0x000a, 0x3af7: 0x000a, 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a, 0x3afb: 0x000a, + 0x3afc: 0x000a, 0x3afd: 0x000a, 0x3afe: 0x000a, 0x3aff: 0x000a, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a, 0x3b03: 0x000a, 0x3b04: 0x000a, 0x3b05: 0x000a, + 0x3b06: 0x000a, 0x3b07: 0x000a, + 0x3b10: 0x000a, 0x3b11: 0x000a, + 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a, 0x3b16: 0x000a, 0x3b17: 0x000a, + 0x3b18: 0x000a, 0x3b19: 0x000a, + 0x3b20: 0x000a, 0x3b21: 0x000a, 0x3b22: 0x000a, 0x3b23: 0x000a, + 0x3b24: 0x000a, 0x3b25: 0x000a, 0x3b26: 0x000a, 0x3b27: 0x000a, 0x3b28: 0x000a, 0x3b29: 0x000a, + 0x3b2a: 0x000a, 0x3b2b: 0x000a, 0x3b2c: 0x000a, 0x3b2d: 0x000a, 0x3b2e: 0x000a, 0x3b2f: 0x000a, + 0x3b30: 0x000a, 0x3b31: 0x000a, 0x3b32: 0x000a, 0x3b33: 0x000a, 0x3b34: 0x000a, 0x3b35: 0x000a, + 0x3b36: 0x000a, 0x3b37: 0x000a, 0x3b38: 0x000a, 0x3b39: 0x000a, 0x3b3a: 0x000a, 0x3b3b: 0x000a, + 0x3b3c: 0x000a, 0x3b3d: 0x000a, 0x3b3e: 0x000a, 0x3b3f: 0x000a, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x000a, 0x3b41: 0x000a, 0x3b42: 0x000a, 0x3b43: 0x000a, 0x3b44: 0x000a, 0x3b45: 0x000a, + 0x3b46: 0x000a, 0x3b47: 0x000a, + 0x3b50: 0x000a, 0x3b51: 0x000a, + 0x3b52: 0x000a, 0x3b53: 0x000a, 0x3b54: 0x000a, 0x3b55: 0x000a, 0x3b56: 0x000a, 0x3b57: 0x000a, + 0x3b58: 0x000a, 0x3b59: 0x000a, 0x3b5a: 0x000a, 0x3b5b: 0x000a, 0x3b5c: 0x000a, 0x3b5d: 0x000a, + 0x3b5e: 0x000a, 0x3b5f: 0x000a, 0x3b60: 0x000a, 0x3b61: 0x000a, 0x3b62: 0x000a, 0x3b63: 0x000a, + 0x3b64: 0x000a, 0x3b65: 0x000a, 0x3b66: 0x000a, 0x3b67: 0x000a, 0x3b68: 0x000a, 0x3b69: 0x000a, + 0x3b6a: 0x000a, 0x3b6b: 0x000a, 0x3b6c: 0x000a, 0x3b6d: 0x000a, + 0x3b70: 0x000a, 0x3b71: 0x000a, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000a, 0x3b81: 0x000a, 0x3b82: 0x000a, 0x3b83: 0x000a, 0x3b84: 0x000a, 0x3b85: 0x000a, + 0x3b86: 0x000a, 0x3b87: 0x000a, 0x3b88: 0x000a, 0x3b89: 0x000a, 0x3b8a: 0x000a, 0x3b8b: 0x000a, + 0x3b8c: 0x000a, 0x3b8d: 0x000a, 0x3b8e: 0x000a, 0x3b8f: 0x000a, 0x3b90: 0x000a, 0x3b91: 0x000a, + 0x3b92: 0x000a, 0x3b93: 0x000a, 0x3b94: 0x000a, 0x3b95: 0x000a, 0x3b96: 0x000a, 0x3b97: 0x000a, + 0x3b98: 0x000a, 0x3b99: 0x000a, 0x3b9a: 0x000a, 0x3b9b: 0x000a, 0x3b9c: 0x000a, 0x3b9d: 0x000a, + 0x3b9e: 0x000a, 0x3b9f: 0x000a, 0x3ba0: 0x000a, 0x3ba1: 0x000a, 0x3ba2: 0x000a, 0x3ba3: 0x000a, + 0x3ba4: 0x000a, 0x3ba5: 0x000a, 0x3ba6: 0x000a, 0x3ba7: 0x000a, 0x3ba8: 0x000a, 0x3ba9: 0x000a, + 0x3baa: 0x000a, 0x3bab: 0x000a, 0x3bac: 0x000a, 0x3bad: 0x000a, 0x3bae: 0x000a, 0x3baf: 0x000a, + 0x3bb0: 0x000a, 0x3bb1: 0x000a, 0x3bb2: 0x000a, 0x3bb3: 0x000a, 0x3bb4: 0x000a, 0x3bb5: 0x000a, + 0x3bb6: 0x000a, 0x3bb7: 0x000a, 0x3bb8: 0x000a, 0x3bba: 0x000a, 0x3bbb: 0x000a, + 0x3bbc: 0x000a, 0x3bbd: 0x000a, 0x3bbe: 0x000a, 0x3bbf: 0x000a, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000a, 0x3bc1: 0x000a, 0x3bc2: 0x000a, 0x3bc3: 0x000a, 0x3bc4: 0x000a, 0x3bc5: 0x000a, + 0x3bc6: 0x000a, 0x3bc7: 0x000a, 0x3bc8: 0x000a, 0x3bc9: 0x000a, 0x3bca: 0x000a, 0x3bcb: 0x000a, + 0x3bcd: 0x000a, 0x3bce: 0x000a, 0x3bcf: 0x000a, 0x3bd0: 0x000a, 0x3bd1: 0x000a, + 0x3bd2: 0x000a, 0x3bd3: 0x000a, 0x3bd4: 0x000a, 0x3bd5: 0x000a, 0x3bd6: 0x000a, 0x3bd7: 0x000a, + 0x3bd8: 0x000a, 0x3bd9: 0x000a, 0x3bda: 0x000a, 0x3bdb: 0x000a, 0x3bdc: 0x000a, 0x3bdd: 0x000a, + 0x3bde: 0x000a, 0x3bdf: 0x000a, 0x3be0: 0x000a, 0x3be1: 0x000a, 0x3be2: 0x000a, 0x3be3: 0x000a, + 0x3be4: 0x000a, 0x3be5: 0x000a, 0x3be6: 0x000a, 0x3be7: 0x000a, 0x3be8: 0x000a, 0x3be9: 0x000a, + 0x3bea: 0x000a, 0x3beb: 0x000a, 0x3bec: 0x000a, 0x3bed: 0x000a, 0x3bee: 0x000a, 0x3bef: 0x000a, + 0x3bf0: 0x000a, 0x3bf1: 0x000a, 0x3bf2: 0x000a, 0x3bf3: 0x000a, 0x3bf4: 0x000a, 0x3bf5: 0x000a, + 0x3bf6: 0x000a, 0x3bf7: 0x000a, 0x3bf8: 0x000a, 0x3bf9: 0x000a, 0x3bfa: 0x000a, 0x3bfb: 0x000a, + 0x3bfc: 0x000a, 0x3bfd: 0x000a, 0x3bfe: 0x000a, 0x3bff: 0x000a, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x000a, 0x3c01: 0x000a, 0x3c02: 0x000a, 0x3c03: 0x000a, 0x3c04: 0x000a, 0x3c05: 0x000a, + 0x3c06: 0x000a, 0x3c07: 0x000a, 0x3c08: 0x000a, 0x3c09: 0x000a, 0x3c0a: 0x000a, 0x3c0b: 0x000a, + 0x3c0c: 0x000a, 0x3c0d: 0x000a, 0x3c0e: 0x000a, 0x3c0f: 0x000a, 0x3c10: 0x000a, 0x3c11: 0x000a, + 0x3c12: 0x000a, 0x3c13: 0x000a, + 0x3c20: 0x000a, 0x3c21: 0x000a, 0x3c22: 0x000a, 0x3c23: 0x000a, + 0x3c24: 0x000a, 0x3c25: 0x000a, 0x3c26: 0x000a, 0x3c27: 0x000a, 0x3c28: 0x000a, 0x3c29: 0x000a, + 0x3c2a: 0x000a, 0x3c2b: 0x000a, 0x3c2c: 0x000a, 0x3c2d: 0x000a, + 0x3c30: 0x000a, 0x3c31: 0x000a, 0x3c32: 0x000a, 0x3c33: 0x000a, 0x3c34: 0x000a, + 0x3c38: 0x000a, 0x3c39: 0x000a, 0x3c3a: 0x000a, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x000a, 0x3c41: 0x000a, 0x3c42: 0x000a, 0x3c43: 0x000a, 0x3c44: 0x000a, 0x3c45: 0x000a, + 0x3c46: 0x000a, + 0x3c50: 0x000a, 0x3c51: 0x000a, + 0x3c52: 0x000a, 0x3c53: 0x000a, 0x3c54: 0x000a, 0x3c55: 0x000a, 0x3c56: 0x000a, 0x3c57: 0x000a, + 0x3c58: 0x000a, 0x3c59: 0x000a, 0x3c5a: 0x000a, 0x3c5b: 0x000a, 0x3c5c: 0x000a, 0x3c5d: 0x000a, + 0x3c5e: 0x000a, 0x3c5f: 0x000a, 0x3c60: 0x000a, 0x3c61: 0x000a, 0x3c62: 0x000a, 0x3c63: 0x000a, + 0x3c64: 0x000a, 0x3c65: 0x000a, 0x3c66: 0x000a, 0x3c67: 0x000a, 0x3c68: 0x000a, + 0x3c70: 0x000a, 0x3c71: 0x000a, 0x3c72: 0x000a, 0x3c73: 0x000a, 0x3c74: 0x000a, 0x3c75: 0x000a, + 0x3c76: 0x000a, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x000a, 0x3c81: 0x000a, 0x3c82: 0x000a, + 0x3c90: 0x000a, 0x3c91: 0x000a, + 0x3c92: 0x000a, 0x3c93: 0x000a, 0x3c94: 0x000a, 0x3c95: 0x000a, 0x3c96: 0x000a, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x000a, 0x3cc1: 0x000a, 0x3cc2: 0x000a, 0x3cc3: 0x000a, 0x3cc4: 0x000a, 0x3cc5: 0x000a, + 0x3cc6: 0x000a, 0x3cc7: 0x000a, 0x3cc8: 0x000a, 0x3cc9: 0x000a, 0x3cca: 0x000a, 0x3ccb: 0x000a, + 0x3ccc: 0x000a, 0x3ccd: 0x000a, 0x3cce: 0x000a, 0x3ccf: 0x000a, 0x3cd0: 0x000a, 0x3cd1: 0x000a, + 0x3cd2: 0x000a, 0x3cd4: 0x000a, 0x3cd5: 0x000a, 0x3cd6: 0x000a, 0x3cd7: 0x000a, + 0x3cd8: 0x000a, 0x3cd9: 0x000a, 0x3cda: 0x000a, 0x3cdb: 0x000a, 0x3cdc: 0x000a, 0x3cdd: 0x000a, + 0x3cde: 0x000a, 0x3cdf: 0x000a, 0x3ce0: 0x000a, 0x3ce1: 0x000a, 0x3ce2: 0x000a, 0x3ce3: 0x000a, + 0x3ce4: 0x000a, 0x3ce5: 0x000a, 0x3ce6: 0x000a, 0x3ce7: 0x000a, 0x3ce8: 0x000a, 0x3ce9: 0x000a, + 0x3cea: 0x000a, 0x3ceb: 0x000a, 0x3cec: 0x000a, 0x3ced: 0x000a, 0x3cee: 0x000a, 0x3cef: 0x000a, + 0x3cf0: 0x000a, 0x3cf1: 0x000a, 0x3cf2: 0x000a, 0x3cf3: 0x000a, 0x3cf4: 0x000a, 0x3cf5: 0x000a, + 0x3cf6: 0x000a, 0x3cf7: 0x000a, 0x3cf8: 0x000a, 0x3cf9: 0x000a, 0x3cfa: 0x000a, 0x3cfb: 0x000a, + 0x3cfc: 0x000a, 0x3cfd: 0x000a, 0x3cfe: 0x000a, 0x3cff: 0x000a, + // Block 0xf4, offset 0x3d00 + 0x3d00: 0x000a, 0x3d01: 0x000a, 0x3d02: 0x000a, 0x3d03: 0x000a, 0x3d04: 0x000a, 0x3d05: 0x000a, + 0x3d06: 0x000a, 0x3d07: 0x000a, 0x3d08: 0x000a, 0x3d09: 0x000a, 0x3d0a: 0x000a, + 0x3d30: 0x0002, 0x3d31: 0x0002, 0x3d32: 0x0002, 0x3d33: 0x0002, 0x3d34: 0x0002, 0x3d35: 0x0002, + 0x3d36: 0x0002, 0x3d37: 0x0002, 0x3d38: 0x0002, 0x3d39: 0x0002, + // Block 0xf5, offset 0x3d40 + 0x3d7e: 0x000b, 0x3d7f: 0x000b, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x000b, 0x3d81: 0x000b, 0x3d82: 0x000b, 0x3d83: 0x000b, 0x3d84: 0x000b, 0x3d85: 0x000b, + 0x3d86: 0x000b, 0x3d87: 0x000b, 0x3d88: 0x000b, 0x3d89: 0x000b, 0x3d8a: 0x000b, 0x3d8b: 0x000b, + 0x3d8c: 0x000b, 0x3d8d: 0x000b, 0x3d8e: 0x000b, 0x3d8f: 0x000b, 0x3d90: 0x000b, 0x3d91: 0x000b, + 0x3d92: 0x000b, 0x3d93: 0x000b, 0x3d94: 0x000b, 0x3d95: 0x000b, 0x3d96: 0x000b, 0x3d97: 0x000b, + 0x3d98: 0x000b, 0x3d99: 0x000b, 0x3d9a: 0x000b, 0x3d9b: 0x000b, 0x3d9c: 0x000b, 0x3d9d: 0x000b, + 0x3d9e: 0x000b, 0x3d9f: 0x000b, 0x3da0: 0x000b, 0x3da1: 0x000b, 0x3da2: 0x000b, 0x3da3: 0x000b, + 0x3da4: 0x000b, 0x3da5: 0x000b, 0x3da6: 0x000b, 0x3da7: 0x000b, 0x3da8: 0x000b, 0x3da9: 0x000b, + 0x3daa: 0x000b, 0x3dab: 0x000b, 0x3dac: 0x000b, 0x3dad: 0x000b, 0x3dae: 0x000b, 0x3daf: 0x000b, + 0x3db0: 0x000b, 0x3db1: 0x000b, 0x3db2: 0x000b, 0x3db3: 0x000b, 0x3db4: 0x000b, 0x3db5: 0x000b, + 0x3db6: 0x000b, 0x3db7: 0x000b, 0x3db8: 0x000b, 0x3db9: 0x000b, 0x3dba: 0x000b, 0x3dbb: 0x000b, + 0x3dbc: 0x000b, 0x3dbd: 0x000b, 0x3dbe: 0x000b, 0x3dbf: 0x000b, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x000c, 0x3dc1: 0x000c, 0x3dc2: 0x000c, 0x3dc3: 0x000c, 0x3dc4: 0x000c, 0x3dc5: 0x000c, + 0x3dc6: 0x000c, 0x3dc7: 0x000c, 0x3dc8: 0x000c, 0x3dc9: 0x000c, 0x3dca: 0x000c, 0x3dcb: 0x000c, + 0x3dcc: 0x000c, 0x3dcd: 0x000c, 0x3dce: 0x000c, 0x3dcf: 0x000c, 0x3dd0: 0x000c, 0x3dd1: 0x000c, + 0x3dd2: 0x000c, 0x3dd3: 0x000c, 0x3dd4: 0x000c, 0x3dd5: 0x000c, 0x3dd6: 0x000c, 0x3dd7: 0x000c, + 0x3dd8: 0x000c, 0x3dd9: 0x000c, 0x3dda: 0x000c, 0x3ddb: 0x000c, 0x3ddc: 0x000c, 0x3ddd: 0x000c, + 0x3dde: 0x000c, 0x3ddf: 0x000c, 0x3de0: 0x000c, 0x3de1: 0x000c, 0x3de2: 0x000c, 0x3de3: 0x000c, + 0x3de4: 0x000c, 0x3de5: 0x000c, 0x3de6: 0x000c, 0x3de7: 0x000c, 0x3de8: 0x000c, 0x3de9: 0x000c, + 0x3dea: 0x000c, 0x3deb: 0x000c, 0x3dec: 0x000c, 0x3ded: 0x000c, 0x3dee: 0x000c, 0x3def: 0x000c, + 0x3df0: 0x000b, 0x3df1: 0x000b, 0x3df2: 0x000b, 0x3df3: 0x000b, 0x3df4: 0x000b, 0x3df5: 0x000b, + 0x3df6: 0x000b, 0x3df7: 0x000b, 0x3df8: 0x000b, 0x3df9: 0x000b, 0x3dfa: 0x000b, 0x3dfb: 0x000b, + 0x3dfc: 0x000b, 0x3dfd: 0x000b, 0x3dfe: 0x000b, 0x3dff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x136: 0x28, 0x137: 0x29, + 0x138: 0x2a, 0x139: 0x2b, 0x13a: 0x2c, 0x13b: 0x2d, 0x13c: 0x2e, 0x13d: 0x2f, 0x13e: 0x30, 0x13f: 0x31, + // Block 0x5, offset 0x140 + 0x140: 0x32, 0x141: 0x33, 0x142: 0x34, + 0x14d: 0x35, 0x14e: 0x36, + 0x150: 0x37, + 0x15a: 0x38, 0x15c: 0x39, 0x15d: 0x3a, 0x15e: 0x3b, 0x15f: 0x3c, + 0x160: 0x3d, 0x162: 0x3e, 0x164: 0x3f, 0x165: 0x40, 0x167: 0x41, + 0x168: 0x42, 0x169: 0x43, 0x16a: 0x44, 0x16b: 0x45, 0x16c: 0x46, 0x16d: 0x47, 0x16e: 0x48, 0x16f: 0x49, + 0x170: 0x4a, 0x173: 0x4b, 0x177: 0x4c, + 0x17e: 0x4d, 0x17f: 0x4e, + // Block 0x6, offset 0x180 + 0x180: 0x4f, 0x181: 0x50, 0x182: 0x51, 0x183: 0x52, 0x184: 0x53, 0x185: 0x54, 0x186: 0x55, 0x187: 0x56, + 0x188: 0x57, 0x189: 0x56, 0x18a: 0x56, 0x18b: 0x56, 0x18c: 0x58, 0x18d: 0x59, 0x18e: 0x5a, 0x18f: 0x56, + 0x190: 0x5b, 0x191: 0x5c, 0x192: 0x5d, 0x193: 0x5e, 0x194: 0x56, 0x195: 0x56, 0x196: 0x56, 0x197: 0x56, + 0x198: 0x56, 0x199: 0x56, 0x19a: 0x5f, 0x19b: 0x56, 0x19c: 0x56, 0x19d: 0x60, 0x19e: 0x56, 0x19f: 0x61, + 0x1a4: 0x56, 0x1a5: 0x56, 0x1a6: 0x62, 0x1a7: 0x63, + 0x1a8: 0x56, 0x1a9: 0x56, 0x1aa: 0x56, 0x1ab: 0x56, 0x1ac: 0x56, 0x1ad: 0x64, 0x1ae: 0x65, 0x1af: 0x56, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x56, 0x1bd: 0x56, 0x1be: 0x56, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x56, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26d: 0x8b, 0x26f: 0x8c, + // Block 0xa, offset 0x280 + 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8f, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x90, + 0x2b8: 0x91, 0x2b9: 0x92, 0x2ba: 0x0e, 0x2bb: 0x93, 0x2bc: 0x94, 0x2bd: 0x95, 0x2bf: 0x96, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x97, 0x2c5: 0x56, 0x2c6: 0x98, 0x2c7: 0x99, + 0x2cb: 0x9a, 0x2cd: 0x9b, + 0x2e0: 0x9c, 0x2e1: 0x9c, 0x2e2: 0x9c, 0x2e3: 0x9c, 0x2e4: 0x9d, 0x2e5: 0x9c, 0x2e6: 0x9c, 0x2e7: 0x9c, + 0x2e8: 0x9e, 0x2e9: 0x9c, 0x2ea: 0x9c, 0x2eb: 0x9f, 0x2ec: 0xa0, 0x2ed: 0x9c, 0x2ee: 0x9c, 0x2ef: 0x9c, + 0x2f0: 0x9c, 0x2f1: 0x9c, 0x2f2: 0x9c, 0x2f3: 0x9c, 0x2f4: 0xa1, 0x2f5: 0x9c, 0x2f6: 0x9c, 0x2f7: 0x9c, + 0x2f8: 0x9c, 0x2f9: 0xa2, 0x2fa: 0xa3, 0x2fb: 0x9c, 0x2fc: 0xa4, 0x2fd: 0xa5, 0x2fe: 0x9c, 0x2ff: 0x9c, + // Block 0xc, offset 0x300 + 0x300: 0xa6, 0x301: 0xa7, 0x302: 0xa8, 0x304: 0xa9, 0x305: 0xaa, 0x306: 0xab, 0x307: 0xac, + 0x308: 0xad, 0x30b: 0xae, 0x30c: 0x26, 0x30d: 0xaf, + 0x310: 0xb0, 0x311: 0xb1, 0x312: 0xb2, 0x313: 0xb3, 0x316: 0xb4, 0x317: 0xb5, + 0x318: 0xb6, 0x319: 0xb7, 0x31a: 0xb8, 0x31c: 0xb9, + 0x320: 0xba, 0x324: 0xbb, 0x325: 0xbc, 0x327: 0xbd, + 0x328: 0xbe, 0x329: 0xbf, 0x32a: 0xc0, + 0x330: 0xc1, 0x332: 0xc2, 0x334: 0xc3, 0x335: 0xc4, 0x336: 0xc5, + 0x33b: 0xc6, 0x33f: 0xc7, + // Block 0xd, offset 0x340 + 0x36b: 0xc8, 0x36c: 0xc9, + 0x37d: 0xca, 0x37e: 0xcb, 0x37f: 0xcc, + // Block 0xe, offset 0x380 + 0x3b2: 0xcd, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xce, 0x3c6: 0xcf, + 0x3c8: 0x56, 0x3c9: 0xd0, 0x3cc: 0x56, 0x3cd: 0xd1, + 0x3db: 0xd2, 0x3dc: 0xd3, 0x3dd: 0xd4, 0x3de: 0xd5, 0x3df: 0xd6, + 0x3e8: 0xd7, 0x3e9: 0xd8, 0x3ea: 0xd9, + // Block 0x10, offset 0x400 + 0x400: 0xda, 0x404: 0xc9, + 0x40b: 0xdb, + 0x420: 0x9c, 0x421: 0x9c, 0x422: 0x9c, 0x423: 0xdc, 0x424: 0x9c, 0x425: 0xdd, 0x426: 0x9c, 0x427: 0x9c, + 0x428: 0x9c, 0x429: 0x9c, 0x42a: 0x9c, 0x42b: 0x9c, 0x42c: 0x9c, 0x42d: 0x9c, 0x42e: 0x9c, 0x42f: 0x9c, + 0x430: 0x9c, 0x431: 0xa4, 0x432: 0x0e, 0x433: 0x9c, 0x434: 0x0e, 0x435: 0xde, 0x436: 0x9c, 0x437: 0x9c, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xdf, 0x43c: 0x9c, 0x43d: 0x9c, 0x43e: 0x9c, 0x43f: 0x9c, + // Block 0x11, offset 0x440 + 0x440: 0xe0, 0x441: 0x56, 0x442: 0xe1, 0x443: 0xe2, 0x444: 0xe3, 0x445: 0xe4, 0x446: 0xe5, + 0x449: 0xe6, 0x44c: 0x56, 0x44d: 0x56, 0x44e: 0x56, 0x44f: 0x56, + 0x450: 0x56, 0x451: 0x56, 0x452: 0x56, 0x453: 0x56, 0x454: 0x56, 0x455: 0x56, 0x456: 0x56, 0x457: 0x56, + 0x458: 0x56, 0x459: 0x56, 0x45a: 0x56, 0x45b: 0xe7, 0x45c: 0x56, 0x45d: 0x6c, 0x45e: 0x56, 0x45f: 0xe8, + 0x460: 0xe9, 0x461: 0xea, 0x462: 0xeb, 0x464: 0x56, 0x465: 0xec, 0x466: 0x56, 0x467: 0xed, + 0x468: 0x56, 0x469: 0xee, 0x46a: 0xef, 0x46b: 0xf0, 0x46c: 0x56, 0x46d: 0x56, 0x46e: 0xf1, 0x46f: 0xf2, + 0x47f: 0xf3, + // Block 0x12, offset 0x480 + 0x4bf: 0xf3, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xf4, 0x541: 0xf4, 0x542: 0xf4, 0x543: 0xf4, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xf5, + 0x548: 0xf4, 0x549: 0xf4, 0x54a: 0xf4, 0x54b: 0xf4, 0x54c: 0xf4, 0x54d: 0xf4, 0x54e: 0xf4, 0x54f: 0xf4, + 0x550: 0xf4, 0x551: 0xf4, 0x552: 0xf4, 0x553: 0xf4, 0x554: 0xf4, 0x555: 0xf4, 0x556: 0xf4, 0x557: 0xf4, + 0x558: 0xf4, 0x559: 0xf4, 0x55a: 0xf4, 0x55b: 0xf4, 0x55c: 0xf4, 0x55d: 0xf4, 0x55e: 0xf4, 0x55f: 0xf4, + 0x560: 0xf4, 0x561: 0xf4, 0x562: 0xf4, 0x563: 0xf4, 0x564: 0xf4, 0x565: 0xf4, 0x566: 0xf4, 0x567: 0xf4, + 0x568: 0xf4, 0x569: 0xf4, 0x56a: 0xf4, 0x56b: 0xf4, 0x56c: 0xf4, 0x56d: 0xf4, 0x56e: 0xf4, 0x56f: 0xf4, + 0x570: 0xf4, 0x571: 0xf4, 0x572: 0xf4, 0x573: 0xf4, 0x574: 0xf4, 0x575: 0xf4, 0x576: 0xf4, 0x577: 0xf4, + 0x578: 0xf4, 0x579: 0xf4, 0x57a: 0xf4, 0x57b: 0xf4, 0x57c: 0xf4, 0x57d: 0xf4, 0x57e: 0xf4, 0x57f: 0xf4, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 17464 bytes (17KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 10f5202c693fd..7e1ae096e5c00 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go new file mode 100644 index 0000000000000..9ea1b421407d5 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -0,0 +1,7760 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "13.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x1870 + firstCCC = 0x2CAB + endMulti = 0x2F77 + firstLeadingCCC = 0x49C5 + firstCCCZeroExcept = 0x4A8F + firstStarterWithNLead = 0x4AB6 + lastDecomp = 0x4AB8 + maxDecomp = 0x8000 +) + +// decomps: 19128 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, + 0xCA, 0x92, 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x9D, + 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xB9, 0x42, 0xCE, + 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, 0x96, + 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, 0xCE, + 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, 0x42, + 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, 0x9E, + 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, 0xCE, + 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, 0x42, + 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, 0xA7, + 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, 0x42, + 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, 0xB6, + 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, 0xCE, + 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, 0x42, + 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, 0xBE, + 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, 0xCF, + 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, 0x42, + 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, 0x86, + // Bytes 200 - 23f + 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, 0xCF, + 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, 0x42, + 0xD0, 0xBD, 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8C, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + // Bytes 300 - 33f + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + // Bytes 340 - 37f + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + // Bytes 380 - 3bf + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + // Bytes 3c0 - 3ff + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + // Bytes 400 - 43f + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE2, + 0x80, 0x82, 0x43, 0xE2, 0x80, 0x83, 0x43, 0xE2, + // Bytes 440 - 47f + 0x80, 0x90, 0x43, 0xE2, 0x80, 0x93, 0x43, 0xE2, + 0x80, 0x94, 0x43, 0xE2, 0x82, 0xA9, 0x43, 0xE2, + 0x86, 0x90, 0x43, 0xE2, 0x86, 0x91, 0x43, 0xE2, + 0x86, 0x92, 0x43, 0xE2, 0x86, 0x93, 0x43, 0xE2, + 0x88, 0x82, 0x43, 0xE2, 0x88, 0x87, 0x43, 0xE2, + 0x88, 0x91, 0x43, 0xE2, 0x88, 0x92, 0x43, 0xE2, + 0x94, 0x82, 0x43, 0xE2, 0x96, 0xA0, 0x43, 0xE2, + 0x97, 0x8B, 0x43, 0xE2, 0xA6, 0x85, 0x43, 0xE2, + // Bytes 480 - 4bf + 0xA6, 0x86, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + // Bytes 4c0 - 4ff + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + // Bytes 500 - 53f + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + // Bytes 540 - 57f + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + // Bytes 600 - 63f + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + // Bytes 640 - 67f + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + // Bytes 680 - 6bf + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + // Bytes 6c0 - 6ff + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + // Bytes 700 - 73f + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + // Bytes 740 - 77f + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + // Bytes 780 - 7bf + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + // Bytes 7c0 - 7ff + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + // Bytes 800 - 83f + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + // Bytes 840 - 87f + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + // Bytes 900 - 93f + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + // Bytes 940 - 97f + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + // Bytes a00 - a3f + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + // Bytes a40 - a7f + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + // Bytes a80 - abf + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + // Bytes ac0 - aff + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + // Bytes b00 - b3f + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + // Bytes b80 - bbf + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + // Bytes bc0 - bff + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + // Bytes c00 - c3f + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + // Bytes c40 - c7f + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + // Bytes c80 - cbf + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + // Bytes cc0 - cff + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + // Bytes d00 - d3f + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + // Bytes d40 - d7f + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + // Bytes d80 - dbf + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + // Bytes dc0 - dff + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + // Bytes e00 - e3f + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + // Bytes e40 - e7f + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + // Bytes e80 - ebf + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + // Bytes ec0 - eff + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + // Bytes f00 - f3f + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + // Bytes f40 - f7f + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + // Bytes f80 - fbf + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + // Bytes fc0 - fff + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + // Bytes 1040 - 107f + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + // Bytes 1100 - 113f + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + // Bytes 1180 - 11bf + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + // Bytes 11c0 - 11ff + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + // Bytes 1200 - 123f + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + // Bytes 1240 - 127f + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + // Bytes 1300 - 133f + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + // Bytes 1340 - 137f + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + // Bytes 1440 - 147f + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + // Bytes 1480 - 14bf + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + // Bytes 14c0 - 14ff + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + // Bytes 1540 - 157f + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + // Bytes 1600 - 163f + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1640 - 167f + 0x9D, 0xAF, 0x43, 0xEA, 0xAC, 0xB7, 0x43, 0xEA, + 0xAD, 0x92, 0x44, 0xF0, 0xA0, 0x84, 0xA2, 0x44, + 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, 0xA0, 0x94, + 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, 0x44, 0xF0, + 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, 0xA0, 0x84, + 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, 0xF0, 0xA0, + 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, 0xA3, 0x44, + 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, 0xA1, 0x9A, + // Bytes 1680 - 16bf + 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, 0x44, 0xF0, + 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, 0xAC, 0x98, + 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, 0xF0, 0xA1, + 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, 0xA6, 0x44, + 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, 0xA2, 0x86, + 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, 0x44, 0xF0, + 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, 0xA1, 0x84, + 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, 0xF0, 0xA2, + // Bytes 16c0 - 16ff + 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, 0xB1, 0x44, + 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, 0xA3, 0x8A, + 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, 0x44, 0xF0, + 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, 0x8E, 0x9C, + 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, 0xF0, 0xA3, + 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, 0xAD, 0x44, + 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, 0xA3, 0xA2, + 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, 0x44, 0xF0, + // Bytes 1700 - 173f + 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, 0xB2, 0xBC, + 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, 0xF0, 0xA3, + 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, 0x9E, 0x44, + 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, 0xA4, 0x89, + 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, 0x44, 0xF0, + 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, 0x98, 0x88, + 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, 0xF0, 0xA4, + 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, 0xB6, 0x44, + // Bytes 1740 - 177f + 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, 0xA4, 0xBE, + 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, 0x44, 0xF0, + 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, 0x83, 0xB2, + 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, 0xF0, 0xA5, + 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, 0xB3, 0x44, + 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, 0xA5, 0x90, + 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, 0x44, 0xF0, + 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, 0x9B, 0x85, + // Bytes 1780 - 17bf + 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, 0xF0, 0xA5, + 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, 0xAB, 0x44, + 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, 0xA5, 0xB3, + 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, 0x44, 0xF0, + 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, 0x88, 0xA8, + 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, 0xF0, 0xA6, + 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, 0xBE, 0x44, + 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, 0xA6, 0x94, + // Bytes 17c0 - 17ff + 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, 0x44, 0xF0, + 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, 0x9E, 0xB5, + 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, 0xF0, 0xA6, + 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, 0x95, 0x44, + 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, 0xA6, 0xBC, + 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, 0x44, 0xF0, + 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, 0x8F, 0x8A, + 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, 0xF0, 0xA7, + // Bytes 1800 - 183f + 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, 0xA6, 0x44, + 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, 0xA7, 0xBB, + 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, 0x44, 0xF0, + 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, 0x97, 0xAD, + 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, 0xF0, 0xA8, + 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, 0xB7, 0x44, + 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, 0xA9, 0x87, + 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, 0x44, 0xF0, + // Bytes 1840 - 187f + 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, 0x92, 0x96, + 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, 0xF0, 0xA9, + 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, 0x8E, 0x44, + 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, 0xAA, 0x88, + 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, 0x44, 0xF0, + 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, 0x98, 0x80, + 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, 0x42, 0x2E, + 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, 0x2E, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, 0x31, 0x30, + 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, 0x42, 0x31, + 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, 0x35, 0x42, + 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, 0x31, 0x38, + 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, 0x42, 0x32, + 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, 0x31, 0x42, + 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, 0x32, 0x34, + 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, 0x39, 0x42, + 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, 0x33, 0x30, + 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, 0x42, 0x33, + 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, 0x35, 0x42, + 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, 0x33, 0x38, + 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, 0x42, 0x34, + 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, 0x31, 0x42, + 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, 0x34, 0x34, + // Bytes 1900 - 193f + 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, 0x42, 0x34, + 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, 0x39, 0x42, + 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, 0x35, 0x30, + 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, 0x42, 0x37, + 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, 0x2C, 0x42, + 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, 0x39, 0x2E, + 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, 0x42, 0x3F, + 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, 0x71, 0x42, + // Bytes 1940 - 197f + 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, 0x44, 0x5A, + 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, 0x42, 0x47, + 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, 0x56, 0x42, + 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, 0x49, 0x49, + 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, 0x42, 0x49, + 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, 0x42, 0x42, + 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, 0x4C, 0x4A, + 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, 0x52, 0x42, + 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, 0x4E, 0x4A, + 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, 0x42, 0x50, + 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, 0x61, 0x42, + 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, 0x53, 0x4D, + 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, 0x42, 0x54, + 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, 0x43, 0x42, + 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, 0x58, 0x49, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, 0x42, 0x63, + 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, 0x61, 0x42, + 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, 0x64, 0x7A, + 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, 0x42, 0x66, + 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, 0x6D, 0x42, + 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, 0x69, 0x6A, + 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, 0x42, 0x69, + 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, 0x56, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, 0x6B, 0x6C, + 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, 0x42, 0x6C, + 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, 0x6E, 0x42, + 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, 0x6D, 0x33, + 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, 0x42, 0x6D, + 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, 0x67, 0x42, + 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, 0x6D, 0x73, + 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, 0x6A, 0x42, + 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, 0x6F, 0x56, + 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, 0x42, 0x70, + 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, 0x63, 0x42, + 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, 0x73, 0x74, + 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, 0x43, 0x28, + 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, 0x43, 0x28, + 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, 0x43, 0x28, + // Bytes 1a80 - 1abf + 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, 0x43, 0x28, + 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, 0x43, 0x28, + 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, 0x43, 0x28, + 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, 0x43, 0x28, + 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, 0x43, 0x28, + 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, 0x43, 0x28, + 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, 0x43, 0x28, + 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, 0x43, 0x28, + // Bytes 1ac0 - 1aff + 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, 0x43, 0x28, + 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, 0x43, 0x28, + 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, 0x43, 0x28, + 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, 0x43, 0x28, + 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, 0x43, 0x28, + 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, 0x43, 0x28, + 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, 0x43, 0x28, + 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, 0x43, 0x28, + // Bytes 1b00 - 1b3f + 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, 0x43, 0x28, + 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, 0x43, 0x28, + 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, 0x43, 0x28, + 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, 0x43, 0x28, + 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, 0x43, 0x28, + 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, 0x43, 0x28, + 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, 0x43, 0x28, + 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, 0x43, 0x28, + // Bytes 1b40 - 1b7f + 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, 0x43, 0x28, + 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, 0x43, 0x28, + 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, 0x43, 0x28, + 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, 0x43, 0x28, + 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, 0x43, 0x31, + 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, 0x43, 0x31, + 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, 0x43, 0x31, + 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, 0x43, 0x31, + // Bytes 1b80 - 1bbf + 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, 0x43, 0x31, + 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, 0x43, 0x32, + 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, 0x43, 0x3D, + 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, 0x43, 0x46, + 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, 0x43, 0x47, + 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, 0x43, 0x4C, + 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, 0x43, 0x4D, + 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, 0x43, 0x4D, + // Bytes 1bc0 - 1bff + 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, 0x43, 0x50, + 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, 0x43, 0x54, + 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, 0x43, 0x56, + 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, 0x43, 0x61, + 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, 0x43, 0x61, + 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, 0x43, 0x63, + 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, 0x43, 0x63, + 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, 0x43, 0x63, + // Bytes 1c00 - 1c3f + 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, 0x43, 0x64, + 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, 0x43, 0x66, + 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, 0x43, 0x67, + 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, 0x43, 0x69, + 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, 0x43, 0x6B, + 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, 0x43, 0x6B, + 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, 0x43, 0x6C, + 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, 0x43, 0x6D, + // Bytes 1c40 - 1c7f + 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, 0x43, 0x6D, + 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, 0x43, 0x72, + 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, 0x43, 0x78, + 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, 0x43, 0xC2, + 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, 0x43, 0xCE, + 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, 0x43, 0xCE, + 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, 0x43, 0xCE, + 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, 0x43, 0xCE, + // Bytes 1c80 - 1cbf + 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, 0x44, 0x28, + 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, 0x31, 0x29, + 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, 0x28, 0x31, + 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, 0x29, 0x44, + 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, 0x31, 0x36, + 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, 0x44, 0x28, + 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, 0x39, 0x29, + 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, 0x30, 0xE7, + // Bytes 1cc0 - 1cff + 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, 0x84, 0x44, + 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, 0xE6, 0x9C, + 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, 0x44, 0x32, + 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, 0x9C, 0x88, + 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, 0x33, 0xE6, + 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, 0x88, 0x44, + 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, 0xE6, 0x97, + 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, 0x44, 0x34, + // Bytes 1d00 - 1d3f + 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, 0x97, 0xA5, + 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, 0x35, 0xE7, + 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, 0xA5, 0x44, + 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, 0xE7, 0x82, + 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, 0x44, 0x37, + 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, 0x82, 0xB9, + 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, 0x38, 0xE6, + 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, 0xB9, 0x44, + // Bytes 1d40 - 1d7f + 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, 0xE6, 0x9C, + 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, 0x44, 0x56, + 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, 0x6D, 0x2E, + 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, 0x70, 0x2E, + 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, 0x69, 0x44, + 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, 0xB4, 0xD5, + 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, 0x44, 0xD5, + 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, 0xD5, 0xB6, + // Bytes 1d80 - 1dbf + 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, 0xD7, 0x90, + 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, 0xB4, 0x44, + 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, 0xA8, 0xD8, + 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, 0x44, 0xD8, + 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, 0xD8, 0xB2, + 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, 0xD8, 0xA8, + 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, 0x87, 0x44, + 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, 0xA8, 0xD9, + // Bytes 1dc0 - 1dff + 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, 0x44, 0xD8, + 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, 0xD8, 0xAE, + 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, 0xD8, 0xAA, + 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, 0x85, 0x44, + 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, 0xAA, 0xD9, + 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, 0x44, 0xD8, + 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, 0xD8, 0xAC, + 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, 0xD8, 0xAB, + // Bytes 1e00 - 1e3f + 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, 0x85, 0x44, + 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, 0xAB, 0xD9, + 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, 0x44, 0xD8, + 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, 0xD8, 0xAD, + 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, 0xD8, 0xAC, + 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, 0x8A, 0x44, + 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, 0xAD, 0xD9, + 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, 0x44, 0xD8, + // Bytes 1e40 - 1e7f + 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, 0xD8, 0xAC, + 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, 0xD8, 0xAE, + 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, 0x89, 0x44, + 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, 0xB3, 0xD8, + 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, 0x44, 0xD8, + 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, 0xD8, 0xB1, + 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, 0xD8, 0xB3, + 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, 0x89, 0x44, + // Bytes 1e80 - 1ebf + 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, 0xB4, 0xD8, + 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, 0x44, 0xD8, + 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, 0xD8, 0xB1, + 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, 0xD8, 0xB4, + 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, 0x89, 0x44, + 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, 0xB5, 0xD8, + 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, 0x44, 0xD8, + 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, 0xD9, 0x85, + // Bytes 1ec0 - 1eff + 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, 0xD8, 0xB5, + 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, 0xAC, 0x44, + 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, 0xB6, 0xD8, + 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, 0x44, 0xD8, + 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, 0xD9, 0x89, + 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, 0xD8, 0xB7, + 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, 0x85, 0x44, + 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, 0xB7, 0xD9, + // Bytes 1f00 - 1f3f + 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, 0x44, 0xD8, + 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, 0xD8, 0xB9, + 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, 0xAC, 0x44, + 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, 0xBA, 0xD9, + 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, 0x44, 0xD9, + 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, 0xD8, 0xAD, + 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, 0xD9, 0x81, + // Bytes 1f40 - 1f7f + 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, 0x89, 0x44, + 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, 0x82, 0xD8, + 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, 0x44, 0xD9, + 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, 0xD9, 0x8A, + 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, 0xD9, 0x83, + 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, 0xAD, 0x44, + 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, 0x83, 0xD9, + 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, 0x44, 0xD9, + // Bytes 1f80 - 1fbf + 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, 0xD9, 0x8A, + 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, 0xD9, 0x84, + 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, 0xAD, 0x44, + 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, 0x84, 0xD9, + 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, 0x44, 0xD9, + 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, 0xD9, 0x8A, + 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, 0xD9, 0x85, + 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, 0xAD, 0x44, + // Bytes 1fc0 - 1fff + 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, 0x85, 0xD9, + 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, 0x44, 0xD9, + 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, 0xD8, 0xAC, + 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, 0xD9, 0x86, + 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, 0xB1, 0x44, + 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, 0x86, 0xD9, + 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, 0x44, 0xD9, + 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, 0xD9, 0x89, + // Bytes 2000 - 203f + 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, 0xD9, 0x87, + 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, 0x85, 0x44, + 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, 0x87, 0xD9, + 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, 0x44, 0xD9, + 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, 0xD8, 0xAD, + 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, 0xD9, 0x8A, + 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, 0xB2, 0x44, + 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, 0x8A, 0xD9, + // Bytes 2040 - 207f + 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, 0x44, 0xD9, + 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, 0xD9, 0x8A, + 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, 0xDB, 0x87, + 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, 0x80, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x86, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8C, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x91, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x89, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, 0xE4, 0xBA, + 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, 0xA3, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, 0x45, 0x28, + 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAD, 0x29, + 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, 0xE5, 0x8D, + // Bytes 2100 - 213f + 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, 0x8D, 0x29, + 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, 0x45, 0x28, + 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, 0xE5, 0x9C, + 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, 0xA6, 0x29, + 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0xA8, 0x29, + 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, 0xE7, 0x81, + 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, 0xB9, 0x29, + 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, 0x45, 0x28, + 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, 0xAD, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, 0x45, 0x28, + 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, 0xE8, 0xB2, + 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, 0x87, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, 0x45, 0x30, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, 0x9C, 0x88, + 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x31, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x32, 0xE7, + // Bytes 21c0 - 21ff + 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x34, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x36, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x38, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x38, + // Bytes 2240 - 227f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, 0x45, 0x32, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x30, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x32, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x34, 0xE7, + // Bytes 2280 - 22bf + 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x38, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x32, + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x35, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x6D, + 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, 0xE2, 0x81, + 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, 0x88, 0x95, + // Bytes 2300 - 233f + 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, 0x95, 0x73, + 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8, + 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8, + 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + // Bytes 2340 - 237f + 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xAA, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, + // Bytes 2380 - 23bf + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, + 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + // Bytes 23c0 - 23ff + 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, + 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x85, + 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + // Bytes 2400 - 243f + 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB5, + 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, 0xB5, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xDB, + 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x89, 0x46, + 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, + 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + // Bytes 2440 - 247f + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB7, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x89, 0x46, + 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xBA, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xBA, 0xD9, + // Bytes 2480 - 24bf + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, 0xD8, 0xAE, + 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, 0xDB, 0x92, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x83, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x83, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + // Bytes 24c0 - 24ff + 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9, + 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, + // Bytes 2500 - 253f + 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAE, + 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD9, 0x85, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x8A, + // Bytes 2540 - 257f + 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x86, + 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x89, 0x46, + // Bytes 2580 - 25bf + 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, + 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD9, 0x87, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xA7, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, 0x46, 0xD9, + // Bytes 25c0 - 25ff + 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x86, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x87, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, 0xD9, 0x8A, + // Bytes 2600 - 263f + 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x90, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x95, 0x46, + 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, 0x46, 0xE0, + 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, 0xE0, 0xBA, + 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, 0xBB, 0x8D, + // Bytes 2640 - 267f + 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, 0x80, 0xE0, + 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBE, 0x92, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x9C, 0xE0, + // Bytes 2680 - 26bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, 0xB7, 0x46, + 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x46, 0xE2, + 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, 0xBB, 0xE3, + 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, 0xE3, 0x82, + // Bytes 26c0 - 26ff + 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0xB3, 0x46, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, 0x46, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, 0x83, 0x9B, + 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, 0xE3, 0x83, + 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xA0, + // Bytes 2700 - 273f + 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, 0x8C, 0x46, + 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, + 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, + 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, + 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, + 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, + 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + // Bytes 2740 - 277f + 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, + // Bytes 2780 - 27bf + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, + // Bytes 27c0 - 27ff + 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, + 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, + 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, + 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, + 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, + 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, + 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, + 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, + // Bytes 2800 - 283f + 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, + 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, + 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + // Bytes 2840 - 287f + 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, + 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, + // Bytes 2880 - 28bf + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, + // Bytes 28c0 - 28ff + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, + // Bytes 2900 - 293f + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, + 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + // Bytes 2940 - 297f + 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, + // Bytes 2980 - 29bf + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, + // Bytes 29c0 - 29ff + 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + // Bytes 2a40 - 2a7f + 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, + 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, + 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, + 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + // Bytes 2b00 - 2b3f + 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, + 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, + 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + // Bytes 2b80 - 2bbf + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, + 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + // Bytes 2bc0 - 2bff + 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, + 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, + 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, + 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, + 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, + // Bytes 2c40 - 2c7f + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, + 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2c80 - 2cbf + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, + 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, + 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, + 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + // Bytes 2cc0 - 2cff + 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + // Bytes 2d00 - 2d3f + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, + 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, + // Bytes 2d80 - 2dbf + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, + 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, + 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, + 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, + // Bytes 2dc0 - 2dff + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, + 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, + 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, 0x01, 0x09, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, 0xB3, + 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, + // Bytes 2e00 - 2e3f + 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, 0x44, 0x5A, + 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, 0xCC, 0x8C, + 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x46, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e40 - 2e7f + 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, 0x46, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e80 - 2ebf + 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, 0x49, + 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, 0x01, + // Bytes 2ec0 - 2eff + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, 0x83, 0xA4, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x82, + 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, 0x85, + 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, 0xE1, + 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, 0xE3, + // Bytes 2f00 - 2f3f + 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x82, 0xB7, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x86, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, 0x86, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, 0x03, + 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, 0xB8, + // Bytes 2f80 - 2fbf + 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, 0x41, + 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, 0x81, 0xCD, + 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, 0x41, 0xCC, + 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, 0xCD, 0x03, + 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, 0xCC, 0x8F, + 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, 0x03, 0x41, + 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, 0xA8, 0xA9, + 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, 0x42, 0xCC, + // Bytes 2fc0 - 2fff + 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, 0xB9, 0x03, + 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, 0xCC, 0x82, + 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, 0x03, 0x43, + 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, 0x87, 0xCD, + 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, 0xA9, 0x03, + 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, 0xCC, 0xB1, + 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, 0x03, 0x45, + // Bytes 3000 - 303f + 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, 0x83, 0xCD, + 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, 0x45, 0xCC, + 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, 0xCD, 0x03, + 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, 0xCC, 0x8C, + 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, 0x03, 0x45, + 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, 0xA8, 0xA9, + 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, 0x45, 0xCC, + 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, 0xCD, 0x03, + // Bytes 3040 - 307f + 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, 0xCC, 0x82, + 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, 0x03, 0x47, + 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, 0x87, 0xCD, + 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, 0x47, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, 0xCD, 0x03, + 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, 0xCC, 0x88, + 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, 0x03, 0x48, + 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, 0xA7, 0xA9, + // Bytes 3080 - 30bf + 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, 0x49, 0xCC, + 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, 0xCD, 0x03, + 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, 0xCC, 0x83, + 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, 0x03, 0x49, + 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, 0x87, 0xCD, + 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, 0x49, 0xCC, + 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, 0xCD, 0x03, + 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, 0xCC, 0xA3, + // Bytes 30c0 - 30ff + 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, 0x03, 0x49, + 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, 0x82, 0xCD, + 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, 0x4B, 0xCC, + 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, 0xB9, 0x03, + 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, 0xCC, 0xB1, + 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, 0x03, 0x4C, + 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, 0xA7, 0xA9, + 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, 0x4C, 0xCC, + // Bytes 3100 - 313f + 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, 0xCD, 0x03, + 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, 0x03, 0x4E, + 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, 0x83, 0xCD, + 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, 0x4E, 0xCC, + 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, 0xB9, 0x03, + 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, 0xCC, 0xAD, + 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, 0x03, 0x4F, + // Bytes 3140 - 317f + 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, 0x81, 0xCD, + 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, 0x4F, 0xCC, + 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, 0xCD, 0x03, + 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, 0xCC, 0x8F, + 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, 0x03, 0x50, + 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, 0x87, 0xCD, + 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, 0x52, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, 0xCD, 0x03, + // Bytes 3180 - 31bf + 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, 0xCC, 0x91, + 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, 0x03, 0x52, + 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, 0x82, 0xCD, + 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, 0x53, 0xCC, + 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, 0xA9, 0x03, + 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, 0xCC, 0x8C, + 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, 0x03, 0x54, + 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, 0xA7, 0xA9, + // Bytes 31c0 - 31ff + 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, 0x54, 0xCC, + 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, 0xCD, 0x03, + 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, 0xCC, 0x82, + 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, 0x03, 0x55, + 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, 0x8A, 0xCD, + 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, 0x55, 0xCC, + 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, 0xCD, 0x03, + 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, 0xCC, 0xA3, + // Bytes 3200 - 323f + 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, 0x03, 0x55, + 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, 0xAD, 0xB9, + 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, 0x56, 0xCC, + 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, 0xB9, 0x03, + 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, 0xCC, 0x81, + 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, 0x03, 0x57, + 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, 0x88, 0xCD, + 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, 0x58, 0xCC, + // Bytes 3240 - 327f + 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, 0xCD, 0x03, + 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, 0xCC, 0x81, + 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, 0x03, 0x59, + 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, 0x84, 0xCD, + 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, 0x59, 0xCC, + 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, 0xCD, 0x03, + 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, 0x81, + 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, 0x03, 0x5A, + // Bytes 3280 - 32bf + 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, 0x8C, 0xCD, + 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, + 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, 0xCD, 0x03, + 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, 0xCC, 0x83, + 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, 0x03, 0x61, + 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, 0x8C, 0xCD, + 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, 0x61, 0xCC, + 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, 0xB9, 0x03, + // Bytes 32c0 - 32ff + 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, 0xCC, 0x87, + 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, 0x03, 0x62, + 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, 0x81, 0xCD, + 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, 0x63, 0xCC, + 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, 0xCD, 0x03, + 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, 0x03, 0x64, + 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, 0xAD, 0xB9, + // Bytes 3300 - 333f + 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, 0x65, 0xCC, + 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, 0xCD, 0x03, + 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, 0xCC, 0x86, + 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, 0x03, 0x65, + 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, 0x89, 0xCD, + 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, 0x65, 0xCC, + 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, 0xCD, 0x03, + 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, 0xCC, 0xAD, + // Bytes 3340 - 337f + 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, 0x03, 0x66, + 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x81, 0xCD, + 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, 0x67, 0xCC, + 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, 0xCD, 0x03, + 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x8C, + 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, 0x87, 0xCD, + 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, 0x68, 0xCC, + // Bytes 3380 - 33bf + 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, 0xB9, 0x03, + 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, 0xCC, 0xAE, + 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, 0x03, 0x69, + 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, 0x81, 0xCD, + 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, 0x69, 0xCC, + 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, 0xCD, 0x03, + 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, 0xCC, 0x89, + 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, 0x03, 0x69, + // Bytes 33c0 - 33ff + 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, 0x91, 0xCD, + 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, 0x69, 0xCC, + 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, 0xB9, 0x03, + 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, 0xCC, 0x8C, + 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, 0x03, 0x6B, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0xA3, 0xB9, + 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, 0x6B, 0xCC, + 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3400 - 343f + 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, 0xCC, 0xA7, + 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, 0x03, 0x6C, + 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, 0x81, 0xCD, + 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, 0x6D, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, 0xCD, 0x03, + 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, 0xCC, 0x83, + 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, 0x03, 0x6E, + 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, 0xA3, 0xB9, + // Bytes 3440 - 347f + 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, 0x6E, 0xCC, + 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, 0xB9, 0x03, + 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, 0xCC, 0x81, + 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, 0x03, 0x6F, + 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, 0x8B, 0xCD, + 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, 0x6F, 0xCC, + 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, 0xCD, 0x03, + 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, 0xCC, 0x87, + // Bytes 3480 - 34bf + 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, 0x03, 0x72, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x8C, 0xCD, + 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, 0x72, 0xCC, + 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, 0xA9, 0x03, + 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, 0xCC, 0x82, + 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, 0x03, 0x73, + 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, 0xA7, 0xA9, + 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, 0x74, 0xCC, + // Bytes 34c0 - 34ff + 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, 0xCD, 0x03, + 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, 0xCC, 0xA6, + 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, 0x03, 0x74, + 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, 0xB1, 0xB9, + 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, 0x75, 0xCC, + 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, 0xCD, 0x03, + 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, 0xCC, 0x89, + 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, 0x03, 0x75, + // Bytes 3500 - 353f + 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, 0x8C, 0xCD, + 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, 0x75, 0xCC, + 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, 0xB9, 0x03, + 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, 0xCC, 0xA8, + 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, 0x03, 0x75, + 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, 0x83, 0xCD, + 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, 0x77, 0xCC, + 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3540 - 357f + 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, 0xCC, 0x87, + 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, 0x03, 0x77, + 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, 0xA3, 0xB9, + 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, 0x78, 0xCC, + 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, 0xCD, 0x03, + 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, 0xCC, 0x82, + 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, 0x03, 0x79, + 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, 0x87, 0xCD, + // Bytes 3580 - 35bf + 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, + 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, 0xCD, 0x03, + 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, 0x81, + 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, 0x03, 0x7A, + 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, 0x8C, 0xCD, + 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, + 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, 0x80, 0xCE, + 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x04, 0xC2, + // Bytes 35c0 - 35ff + 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, 0x86, 0xCC, + 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, 0x84, 0xCD, + 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0xA6, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, 0x81, 0xCD, + 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, 0x04, 0xC6, + 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, 0x92, 0xCC, + 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x80, 0xCD, + // Bytes 3600 - 363f + 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + // Bytes 3640 - 367f + 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + // Bytes 3680 - 36bf + 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB1, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xB7, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, 0x80, 0xCD, + // Bytes 36c0 - 36ff + 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, 0x82, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, 0x94, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + // Bytes 3700 - 373f + 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x86, 0xCD, + 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, 0x04, 0xCF, + 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, 0x92, 0xCC, + 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x90, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + // Bytes 3740 - 377f + 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x95, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x9A, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, 0x88, 0xCD, + // Bytes 3780 - 37bf + 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, 0xCD, + 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xAD, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + // Bytes 37c0 - 37ff + 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB6, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + // Bytes 3800 - 383f + 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x86, 0xCD, + 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, 0x87, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0xB4, 0xCC, + 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, 0xCD, + // Bytes 3840 - 387f + 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA8, 0xCC, + 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, 0x88, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, + 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, 0x94, 0xCD, + 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x92, 0xD9, + // Bytes 3880 - 38bf + 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, 0x94, 0xCD, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x83, + 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, 0xCE, + // Bytes 38c0 - 38ff + 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x41, + 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, 0xCC, 0xA3, + 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, 0xA7, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + // Bytes 3900 - 393f + 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, + 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, 0xA7, 0xCC, + 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x4F, + // Bytes 3940 - 397f + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x88, + 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, + 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, + 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x4F, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3980 - 39bf + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, + 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x52, + 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x53, 0xCC, + 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0x8C, + // Bytes 39c0 - 39ff + 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0xA3, 0xCC, + 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, 0xCC, 0x81, + 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x55, 0xCC, + 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + // Bytes 3a00 - 3a3f + 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x81, + // Bytes 3a40 - 3a7f + 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, 0xCE, + 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCE, 0x05, + 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x61, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x61, 0xCC, + 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, 0xA3, 0xCC, + 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + // Bytes 3a80 - 3abf + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x65, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x84, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0xA3, 0xCC, + 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, 0xCC, 0x86, + 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + // Bytes 3ac0 - 3aff + 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, 0xCE, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x6F, + // Bytes 3b00 - 3b3f + 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x6F, + 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x72, 0xCC, + // Bytes 3b40 - 3b7f + 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, 0xCC, 0x81, + 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0x8C, 0xCC, + 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, 0xCC, 0x87, + 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, 0xCE, + 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, 0xCC, 0x88, + // Bytes 3b80 - 3bbf + 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x81, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, 0xCE, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, 0x05, + 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBE, + 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBE, 0xBF, + 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + // Bytes 3bc0 - 3bff + 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, 0x81, + 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, 0xCE, + 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x92, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, 0xB8, + // Bytes 3c00 - 3c3f + 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, + 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, 0x05, + // Bytes 3c40 - 3c7f + 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c80 - 3cbf + 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3cc0 - 3cff + 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xAB, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, 0x06, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3d00 - 3d3f + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d40 - 3d7f + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d80 - 3dbf + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3dc0 - 3dff + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3e00 - 3e3f + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e40 - 3e7f + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e80 - 3ebf + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3ec0 - 3eff + 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + // Bytes 3f00 - 3f3f + 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x89, 0x06, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x15, 0x06, + 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f40 - 3f7f + 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f80 - 3fbf + 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3fc0 - 3fff + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4000 - 403f + 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4040 - 407f + 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 4080 - 40bf + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 40c0 - 40ff + 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x08, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + // Bytes 4100 - 413f + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4140 - 417f + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + // Bytes 4180 - 41bf + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 41c0 - 41ff + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4200 - 423f + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + // Bytes 4240 - 427f + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, 0xBA, + 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, 0x91, + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0xA5, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, 0xC2, 0xB4, + 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, 0x43, 0x20, + 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, 0x84, 0xCD, + // Bytes 4280 - 42bf + 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, 0x20, 0xCC, + 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, 0xCD, 0x43, + 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, 0xCC, 0x8A, + 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, 0x43, 0x20, + 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, 0x94, 0xCD, + 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, 0x20, 0xCC, + 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, 0xB9, 0x43, + 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, 0xCD, 0x85, + // Bytes 42c0 - 42ff + 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, 0x43, 0x20, + 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, 0x8D, 0x65, + 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, 0x20, 0xD9, + 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, 0x71, 0x43, + 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, 0xD9, 0x92, + 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, 0x43, 0x73, + 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, 0x82, 0x99, + 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x11, 0x44, + // Bytes 4300 - 433f + 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, 0xCE, 0x91, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x9F, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x44, + 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, 0xCC, 0x81, + // Bytes 4340 - 437f + 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xBF, + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, 0xCC, 0x81, + 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x44, + 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, 0xD7, 0x90, + 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x92, + // Bytes 4380 - 43bf + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, 0xD7, 0x95, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, 0xD7, 0x99, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x45, 0x44, + // Bytes 43c0 - 43ff + 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x9C, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA3, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA7, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, 0xD6, 0xBC, + // Bytes 4400 - 443f + 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, 0xD7, 0xA9, + 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x35, 0x44, + 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, 0xD8, 0xA7, + 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x44, + 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, 0xD8, 0xB1, + // Bytes 4440 - 447f + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, 0xD9, 0x8B, + 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x69, 0x44, + 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, 0xD9, 0x80, + 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x91, + 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x79, 0x44, + 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x88, + 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, 0xD9, 0xB0, + 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x44, + // Bytes 4480 - 44bf + 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, 0xDB, 0x95, + 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, 0x88, 0xCC, + 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x45, + 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x45, 0x20, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x94, + // Bytes 44c0 - 44ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x94, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x45, 0x20, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, + 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, 0xAB, 0x9D, + 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, 0x88, + // Bytes 4500 - 453f + 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, 0xD9, 0x8E, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x8F, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x90, + 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, 0x95, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x96, 0xE0, + // Bytes 4540 - 457f + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x97, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x9C, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA1, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA2, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAB, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAF, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA1, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA2, 0xE0, + // Bytes 4580 - 45bf + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xAF, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x96, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x97, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x9C, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xAB, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB2, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB8, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA1, 0xE0, + // Bytes 45c0 - 45ff + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA2, 0xE0, + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, 0xB3, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, 0x85, 0x97, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + // Bytes 4600 - 463f + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x49, 0xE0, + 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, + 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + // Bytes 4640 - 467f + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB1, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4680 - 46bf + 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x83, + 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, 0xCC, 0x86, + 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, 0x83, 0x41, + 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, 0x8A, 0xCD, + 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, 0x43, 0xCC, + // Bytes 46c0 - 46ff + 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, 0xCD, 0x83, + 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, 0xCC, 0xA3, + 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, 0x83, 0x49, + 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, 0xA3, 0xB9, + 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, 0x4F, 0xCC, + 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, 0xCD, 0x83, + 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, 0xCC, 0x88, + 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, 0x83, 0x4F, + // Bytes 4700 - 473f + 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0xA8, 0xA9, + 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, 0x53, 0xCC, + 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, 0xCD, 0x83, + 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, 0xCC, 0x83, + 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, 0x83, 0x55, + 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, 0x9B, 0xB1, + 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, 0x61, 0xCC, + 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, 0xCD, 0x83, + // Bytes 4740 - 477f + 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, 0xCC, 0x8A, + 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, 0x83, 0x63, + 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, 0x82, 0xCD, + 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, 0x65, 0xCC, + 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, 0xA9, 0x83, + 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, 0xCC, 0xA3, + 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, 0x83, 0x6F, + 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, 0x84, 0xCD, + // Bytes 4780 - 47bf + 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, 0x6F, 0xCC, + 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, 0xB1, 0x83, + 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0xA8, + 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, 0x83, 0x73, + 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, 0x8C, 0xCD, + 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, 0x75, 0xCC, + 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, 0xCD, 0x83, + 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, 0xCC, 0x9B, + // Bytes 47c0 - 47ff + 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x95, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA5, + // Bytes 4800 - 483f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4840 - 487f + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x93, + // Bytes 4880 - 48bf + 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, 0xCF, 0x89, + 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x91, + // Bytes 48c0 - 48ff + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + // Bytes 4940 - 497f + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + // Bytes 4980 - 49bf + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, 0xCC, 0x80, + 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, 0x33, 0x42, + 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, 0x85, 0xA1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA5, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, 0xE1, + // Bytes 4a00 - 4a3f + 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA9, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAD, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, 0x00, + // Bytes 4a40 - 4a7f + 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB5, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB0, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB4, + // Bytes 4a80 - 4abf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, 0x00, + 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x33, 0x43, + 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, 0xE3, 0x82, + 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10680 bytes (10.43 KiB). Checksum: a555db76d4becdd2. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, + // Block 0x5, offset 0x140 + 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35b8, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3738, 0x2c1: 0x3744, 0x2c3: 0x3732, + 0x2c6: 0xa000, 0x2c7: 0x3720, + 0x2cc: 0x3774, 0x2cd: 0x375c, 0x2ce: 0x3786, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3768, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37ec, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x374a, 0x302: 0x37ce, + 0x310: 0x3726, 0x311: 0x37aa, + 0x312: 0x372c, 0x313: 0x37b0, 0x316: 0x373e, 0x317: 0x37c2, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3840, 0x31b: 0x3846, 0x31c: 0x3750, 0x31d: 0x37d4, + 0x31e: 0x3756, 0x31f: 0x37da, 0x322: 0x3762, 0x323: 0x37e6, + 0x324: 0x376e, 0x325: 0x37f2, 0x326: 0x377a, 0x327: 0x37fe, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x384c, 0x32b: 0x3852, 0x32c: 0x37a4, 0x32d: 0x3828, 0x32e: 0x3780, 0x32f: 0x3804, + 0x330: 0x378c, 0x331: 0x3810, 0x332: 0x3792, 0x333: 0x3816, 0x334: 0x3798, 0x335: 0x381c, + 0x338: 0x379e, 0x339: 0x3822, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d33, 0x407: 0xa000, 0x408: 0x2d3b, 0x409: 0xa000, 0x40a: 0x2d43, 0x40b: 0xa000, + 0x40c: 0x2d4b, 0x40d: 0xa000, 0x40e: 0x2d53, 0x411: 0xa000, + 0x412: 0x2d5b, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d63, + 0x43c: 0xa000, 0x43d: 0x2d6b, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x2fae, 0x481: 0x32ba, 0x482: 0x2fb8, 0x483: 0x32c4, 0x484: 0x2fbd, 0x485: 0x32c9, + 0x486: 0x2fc2, 0x487: 0x32ce, 0x488: 0x38e3, 0x489: 0x3a72, 0x48a: 0x2fdb, 0x48b: 0x32e7, + 0x48c: 0x2fe5, 0x48d: 0x32f1, 0x48e: 0x2ff4, 0x48f: 0x3300, 0x490: 0x2fea, 0x491: 0x32f6, + 0x492: 0x2fef, 0x493: 0x32fb, 0x494: 0x3906, 0x495: 0x3a95, 0x496: 0x390d, 0x497: 0x3a9c, + 0x498: 0x3030, 0x499: 0x333c, 0x49a: 0x3035, 0x49b: 0x3341, 0x49c: 0x391b, 0x49d: 0x3aaa, + 0x49e: 0x303a, 0x49f: 0x3346, 0x4a0: 0x3049, 0x4a1: 0x3355, 0x4a2: 0x3067, 0x4a3: 0x3373, + 0x4a4: 0x3076, 0x4a5: 0x3382, 0x4a6: 0x306c, 0x4a7: 0x3378, 0x4a8: 0x307b, 0x4a9: 0x3387, + 0x4aa: 0x3080, 0x4ab: 0x338c, 0x4ac: 0x30c6, 0x4ad: 0x33d2, 0x4ae: 0x3922, 0x4af: 0x3ab1, + 0x4b0: 0x30d0, 0x4b1: 0x33e1, 0x4b2: 0x30da, 0x4b3: 0x33eb, 0x4b4: 0x30e4, 0x4b5: 0x33f5, + 0x4b6: 0x46db, 0x4b7: 0x476c, 0x4b8: 0x3929, 0x4b9: 0x3ab8, 0x4ba: 0x30fd, 0x4bb: 0x340e, + 0x4bc: 0x30f8, 0x4bd: 0x3409, 0x4be: 0x3102, 0x4bf: 0x3413, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3107, 0x4c1: 0x3418, 0x4c2: 0x310c, 0x4c3: 0x341d, 0x4c4: 0x3120, 0x4c5: 0x3431, + 0x4c6: 0x312a, 0x4c7: 0x343b, 0x4c8: 0x3139, 0x4c9: 0x344a, 0x4ca: 0x3134, 0x4cb: 0x3445, + 0x4cc: 0x394c, 0x4cd: 0x3adb, 0x4ce: 0x395a, 0x4cf: 0x3ae9, 0x4d0: 0x3961, 0x4d1: 0x3af0, + 0x4d2: 0x3968, 0x4d3: 0x3af7, 0x4d4: 0x3166, 0x4d5: 0x3477, 0x4d6: 0x316b, 0x4d7: 0x347c, + 0x4d8: 0x3175, 0x4d9: 0x3486, 0x4da: 0x4708, 0x4db: 0x4799, 0x4dc: 0x39ae, 0x4dd: 0x3b3d, + 0x4de: 0x318e, 0x4df: 0x349f, 0x4e0: 0x3198, 0x4e1: 0x34a9, 0x4e2: 0x4717, 0x4e3: 0x47a8, + 0x4e4: 0x39b5, 0x4e5: 0x3b44, 0x4e6: 0x39bc, 0x4e7: 0x3b4b, 0x4e8: 0x39c3, 0x4e9: 0x3b52, + 0x4ea: 0x31a7, 0x4eb: 0x34b8, 0x4ec: 0x31b1, 0x4ed: 0x34c7, 0x4ee: 0x31c5, 0x4ef: 0x34db, + 0x4f0: 0x31c0, 0x4f1: 0x34d6, 0x4f2: 0x3201, 0x4f3: 0x3517, 0x4f4: 0x3210, 0x4f5: 0x3526, + 0x4f6: 0x320b, 0x4f7: 0x3521, 0x4f8: 0x39ca, 0x4f9: 0x3b59, 0x4fa: 0x39d1, 0x4fb: 0x3b60, + 0x4fc: 0x3215, 0x4fd: 0x352b, 0x4fe: 0x321a, 0x4ff: 0x3530, + // Block 0x14, offset 0x500 + 0x500: 0x321f, 0x501: 0x3535, 0x502: 0x3224, 0x503: 0x353a, 0x504: 0x3233, 0x505: 0x3549, + 0x506: 0x322e, 0x507: 0x3544, 0x508: 0x3238, 0x509: 0x3553, 0x50a: 0x323d, 0x50b: 0x3558, + 0x50c: 0x3242, 0x50d: 0x355d, 0x50e: 0x3260, 0x50f: 0x357b, 0x510: 0x3279, 0x511: 0x3599, + 0x512: 0x3288, 0x513: 0x35a8, 0x514: 0x328d, 0x515: 0x35ad, 0x516: 0x3391, 0x517: 0x34bd, + 0x518: 0x354e, 0x519: 0x358a, 0x51b: 0x35e8, + 0x520: 0x46b8, 0x521: 0x4749, 0x522: 0x2f9a, 0x523: 0x32a6, + 0x524: 0x388f, 0x525: 0x3a1e, 0x526: 0x3888, 0x527: 0x3a17, 0x528: 0x389d, 0x529: 0x3a2c, + 0x52a: 0x3896, 0x52b: 0x3a25, 0x52c: 0x38d5, 0x52d: 0x3a64, 0x52e: 0x38ab, 0x52f: 0x3a3a, + 0x530: 0x38a4, 0x531: 0x3a33, 0x532: 0x38b9, 0x533: 0x3a48, 0x534: 0x38b2, 0x535: 0x3a41, + 0x536: 0x38dc, 0x537: 0x3a6b, 0x538: 0x46cc, 0x539: 0x475d, 0x53a: 0x3017, 0x53b: 0x3323, + 0x53c: 0x3003, 0x53d: 0x330f, 0x53e: 0x38f1, 0x53f: 0x3a80, + // Block 0x15, offset 0x540 + 0x540: 0x38ea, 0x541: 0x3a79, 0x542: 0x38ff, 0x543: 0x3a8e, 0x544: 0x38f8, 0x545: 0x3a87, + 0x546: 0x3914, 0x547: 0x3aa3, 0x548: 0x30a8, 0x549: 0x33b4, 0x54a: 0x30bc, 0x54b: 0x33c8, + 0x54c: 0x46fe, 0x54d: 0x478f, 0x54e: 0x314d, 0x54f: 0x345e, 0x550: 0x3937, 0x551: 0x3ac6, + 0x552: 0x3930, 0x553: 0x3abf, 0x554: 0x3945, 0x555: 0x3ad4, 0x556: 0x393e, 0x557: 0x3acd, + 0x558: 0x39a0, 0x559: 0x3b2f, 0x55a: 0x3984, 0x55b: 0x3b13, 0x55c: 0x397d, 0x55d: 0x3b0c, + 0x55e: 0x3992, 0x55f: 0x3b21, 0x560: 0x398b, 0x561: 0x3b1a, 0x562: 0x3999, 0x563: 0x3b28, + 0x564: 0x31fc, 0x565: 0x3512, 0x566: 0x31de, 0x567: 0x34f4, 0x568: 0x39fb, 0x569: 0x3b8a, + 0x56a: 0x39f4, 0x56b: 0x3b83, 0x56c: 0x3a09, 0x56d: 0x3b98, 0x56e: 0x3a02, 0x56f: 0x3b91, + 0x570: 0x3a10, 0x571: 0x3b9f, 0x572: 0x3247, 0x573: 0x3562, 0x574: 0x326f, 0x575: 0x358f, + 0x576: 0x326a, 0x577: 0x3585, 0x578: 0x3256, 0x579: 0x3571, + // Block 0x16, offset 0x580 + 0x580: 0x481b, 0x581: 0x4821, 0x582: 0x4935, 0x583: 0x494d, 0x584: 0x493d, 0x585: 0x4955, + 0x586: 0x4945, 0x587: 0x495d, 0x588: 0x47c1, 0x589: 0x47c7, 0x58a: 0x48a5, 0x58b: 0x48bd, + 0x58c: 0x48ad, 0x58d: 0x48c5, 0x58e: 0x48b5, 0x58f: 0x48cd, 0x590: 0x482d, 0x591: 0x4833, + 0x592: 0x3dcf, 0x593: 0x3ddf, 0x594: 0x3dd7, 0x595: 0x3de7, + 0x598: 0x47cd, 0x599: 0x47d3, 0x59a: 0x3cff, 0x59b: 0x3d0f, 0x59c: 0x3d07, 0x59d: 0x3d17, + 0x5a0: 0x4845, 0x5a1: 0x484b, 0x5a2: 0x4965, 0x5a3: 0x497d, + 0x5a4: 0x496d, 0x5a5: 0x4985, 0x5a6: 0x4975, 0x5a7: 0x498d, 0x5a8: 0x47d9, 0x5a9: 0x47df, + 0x5aa: 0x48d5, 0x5ab: 0x48ed, 0x5ac: 0x48dd, 0x5ad: 0x48f5, 0x5ae: 0x48e5, 0x5af: 0x48fd, + 0x5b0: 0x485d, 0x5b1: 0x4863, 0x5b2: 0x3e2f, 0x5b3: 0x3e47, 0x5b4: 0x3e37, 0x5b5: 0x3e4f, + 0x5b6: 0x3e3f, 0x5b7: 0x3e57, 0x5b8: 0x47e5, 0x5b9: 0x47eb, 0x5ba: 0x3d2f, 0x5bb: 0x3d47, + 0x5bc: 0x3d37, 0x5bd: 0x3d4f, 0x5be: 0x3d3f, 0x5bf: 0x3d57, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4869, 0x5c1: 0x486f, 0x5c2: 0x3e5f, 0x5c3: 0x3e6f, 0x5c4: 0x3e67, 0x5c5: 0x3e77, + 0x5c8: 0x47f1, 0x5c9: 0x47f7, 0x5ca: 0x3d5f, 0x5cb: 0x3d6f, + 0x5cc: 0x3d67, 0x5cd: 0x3d77, 0x5d0: 0x487b, 0x5d1: 0x4881, + 0x5d2: 0x3e97, 0x5d3: 0x3eaf, 0x5d4: 0x3e9f, 0x5d5: 0x3eb7, 0x5d6: 0x3ea7, 0x5d7: 0x3ebf, + 0x5d9: 0x47fd, 0x5db: 0x3d7f, 0x5dd: 0x3d87, + 0x5df: 0x3d8f, 0x5e0: 0x4893, 0x5e1: 0x4899, 0x5e2: 0x4995, 0x5e3: 0x49ad, + 0x5e4: 0x499d, 0x5e5: 0x49b5, 0x5e6: 0x49a5, 0x5e7: 0x49bd, 0x5e8: 0x4803, 0x5e9: 0x4809, + 0x5ea: 0x4905, 0x5eb: 0x491d, 0x5ec: 0x490d, 0x5ed: 0x4925, 0x5ee: 0x4915, 0x5ef: 0x492d, + 0x5f0: 0x480f, 0x5f1: 0x4335, 0x5f2: 0x36a8, 0x5f3: 0x433b, 0x5f4: 0x4839, 0x5f5: 0x4341, + 0x5f6: 0x36ba, 0x5f7: 0x4347, 0x5f8: 0x36d8, 0x5f9: 0x434d, 0x5fa: 0x36f0, 0x5fb: 0x4353, + 0x5fc: 0x4887, 0x5fd: 0x4359, + // Block 0x18, offset 0x600 + 0x600: 0x3db7, 0x601: 0x3dbf, 0x602: 0x419b, 0x603: 0x41b9, 0x604: 0x41a5, 0x605: 0x41c3, + 0x606: 0x41af, 0x607: 0x41cd, 0x608: 0x3cef, 0x609: 0x3cf7, 0x60a: 0x40e7, 0x60b: 0x4105, + 0x60c: 0x40f1, 0x60d: 0x410f, 0x60e: 0x40fb, 0x60f: 0x4119, 0x610: 0x3dff, 0x611: 0x3e07, + 0x612: 0x41d7, 0x613: 0x41f5, 0x614: 0x41e1, 0x615: 0x41ff, 0x616: 0x41eb, 0x617: 0x4209, + 0x618: 0x3d1f, 0x619: 0x3d27, 0x61a: 0x4123, 0x61b: 0x4141, 0x61c: 0x412d, 0x61d: 0x414b, + 0x61e: 0x4137, 0x61f: 0x4155, 0x620: 0x3ed7, 0x621: 0x3edf, 0x622: 0x4213, 0x623: 0x4231, + 0x624: 0x421d, 0x625: 0x423b, 0x626: 0x4227, 0x627: 0x4245, 0x628: 0x3d97, 0x629: 0x3d9f, + 0x62a: 0x415f, 0x62b: 0x417d, 0x62c: 0x4169, 0x62d: 0x4187, 0x62e: 0x4173, 0x62f: 0x4191, + 0x630: 0x369c, 0x631: 0x3696, 0x632: 0x3da7, 0x633: 0x36a2, 0x634: 0x3daf, + 0x636: 0x4827, 0x637: 0x3dc7, 0x638: 0x360c, 0x639: 0x3606, 0x63a: 0x35fa, 0x63b: 0x4305, + 0x63c: 0x3612, 0x63d: 0x8100, 0x63e: 0x01d6, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35be, 0x642: 0x3def, 0x643: 0x36b4, 0x644: 0x3df7, + 0x646: 0x4851, 0x647: 0x3e0f, 0x648: 0x3618, 0x649: 0x430b, 0x64a: 0x3624, 0x64b: 0x4311, + 0x64c: 0x3630, 0x64d: 0x3ba6, 0x64e: 0x3bad, 0x64f: 0x3bb4, 0x650: 0x36cc, 0x651: 0x36c6, + 0x652: 0x3e17, 0x653: 0x44fb, 0x656: 0x36d2, 0x657: 0x3e27, + 0x658: 0x3648, 0x659: 0x3642, 0x65a: 0x3636, 0x65b: 0x4317, 0x65d: 0x3bbb, + 0x65e: 0x3bc2, 0x65f: 0x3bc9, 0x660: 0x3702, 0x661: 0x36fc, 0x662: 0x3e7f, 0x663: 0x4503, + 0x664: 0x36e4, 0x665: 0x36ea, 0x666: 0x3708, 0x667: 0x3e8f, 0x668: 0x3678, 0x669: 0x3672, + 0x66a: 0x3666, 0x66b: 0x4323, 0x66c: 0x3660, 0x66d: 0x35b2, 0x66e: 0x42ff, 0x66f: 0x0081, + 0x672: 0x3ec7, 0x673: 0x370e, 0x674: 0x3ecf, + 0x676: 0x489f, 0x677: 0x3ee7, 0x678: 0x3654, 0x679: 0x431d, 0x67a: 0x3684, 0x67b: 0x432f, + 0x67c: 0x3690, 0x67d: 0x426d, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c1d, 0x683: 0xa000, 0x684: 0x3c24, 0x685: 0xa000, + 0x687: 0x3c2b, 0x688: 0xa000, 0x689: 0x3c32, + 0x68d: 0xa000, + 0x6a0: 0x2f7c, 0x6a1: 0xa000, 0x6a2: 0x3c40, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c39, 0x6ae: 0x2f77, 0x6af: 0x2f81, + 0x6b0: 0x3c47, 0x6b1: 0x3c4e, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c55, 0x6b5: 0x3c5c, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c63, 0x6b9: 0x3c6a, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c71, 0x6c1: 0x3c78, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c8d, 0x6c5: 0x3c94, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c9b, 0x6c9: 0x3ca2, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3cb7, 0x6ed: 0x3cbe, 0x6ee: 0x3cc5, 0x6ef: 0x3ccc, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f1f, 0x70d: 0xa000, 0x70e: 0x3f27, 0x70f: 0xa000, 0x710: 0x3f2f, 0x711: 0xa000, + 0x712: 0x3f37, 0x713: 0xa000, 0x714: 0x3f3f, 0x715: 0xa000, 0x716: 0x3f47, 0x717: 0xa000, + 0x718: 0x3f4f, 0x719: 0xa000, 0x71a: 0x3f57, 0x71b: 0xa000, 0x71c: 0x3f5f, 0x71d: 0xa000, + 0x71e: 0x3f67, 0x71f: 0xa000, 0x720: 0x3f6f, 0x721: 0xa000, 0x722: 0x3f77, + 0x724: 0xa000, 0x725: 0x3f7f, 0x726: 0xa000, 0x727: 0x3f87, 0x728: 0xa000, 0x729: 0x3f8f, + 0x72f: 0xa000, + 0x730: 0x3f97, 0x731: 0x3f9f, 0x732: 0xa000, 0x733: 0x3fa7, 0x734: 0x3faf, 0x735: 0xa000, + 0x736: 0x3fb7, 0x737: 0x3fbf, 0x738: 0xa000, 0x739: 0x3fc7, 0x73a: 0x3fcf, 0x73b: 0xa000, + 0x73c: 0x3fd7, 0x73d: 0x3fdf, + // Block 0x1d, offset 0x740 + 0x754: 0x3f17, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fe7, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3ff7, 0x76d: 0xa000, 0x76e: 0x3fff, 0x76f: 0xa000, + 0x770: 0x4007, 0x771: 0xa000, 0x772: 0x400f, 0x773: 0xa000, 0x774: 0x4017, 0x775: 0xa000, + 0x776: 0x401f, 0x777: 0xa000, 0x778: 0x4027, 0x779: 0xa000, 0x77a: 0x402f, 0x77b: 0xa000, + 0x77c: 0x4037, 0x77d: 0xa000, 0x77e: 0x403f, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4047, 0x781: 0xa000, 0x782: 0x404f, 0x784: 0xa000, 0x785: 0x4057, + 0x786: 0xa000, 0x787: 0x405f, 0x788: 0xa000, 0x789: 0x4067, + 0x78f: 0xa000, 0x790: 0x406f, 0x791: 0x4077, + 0x792: 0xa000, 0x793: 0x407f, 0x794: 0x4087, 0x795: 0xa000, 0x796: 0x408f, 0x797: 0x4097, + 0x798: 0xa000, 0x799: 0x409f, 0x79a: 0x40a7, 0x79b: 0xa000, 0x79c: 0x40af, 0x79d: 0x40b7, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fef, + 0x7b7: 0x40bf, 0x7b8: 0x40c7, 0x7b9: 0x40cf, 0x7ba: 0x40d7, + 0x7bd: 0xa000, 0x7be: 0x40df, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x137a, 0x7c1: 0x0cfe, 0x7c2: 0x13d6, 0x7c3: 0x13a2, 0x7c4: 0x0e5a, 0x7c5: 0x06ee, + 0x7c6: 0x08e2, 0x7c7: 0x162e, 0x7c8: 0x162e, 0x7c9: 0x0a0e, 0x7ca: 0x1462, 0x7cb: 0x0946, + 0x7cc: 0x0a0a, 0x7cd: 0x0bf2, 0x7ce: 0x0fd2, 0x7cf: 0x1162, 0x7d0: 0x129a, 0x7d1: 0x12d6, + 0x7d2: 0x130a, 0x7d3: 0x141e, 0x7d4: 0x0d76, 0x7d5: 0x0e02, 0x7d6: 0x0eae, 0x7d7: 0x0f46, + 0x7d8: 0x1262, 0x7d9: 0x144a, 0x7da: 0x1576, 0x7db: 0x0712, 0x7dc: 0x08b6, 0x7dd: 0x0d8a, + 0x7de: 0x0ed2, 0x7df: 0x1296, 0x7e0: 0x15c6, 0x7e1: 0x0ab6, 0x7e2: 0x0e7a, 0x7e3: 0x1286, + 0x7e4: 0x131a, 0x7e5: 0x0c26, 0x7e6: 0x11be, 0x7e7: 0x12e2, 0x7e8: 0x0b22, 0x7e9: 0x0d12, + 0x7ea: 0x0e1a, 0x7eb: 0x0f1e, 0x7ec: 0x142a, 0x7ed: 0x0752, 0x7ee: 0x07ea, 0x7ef: 0x0856, + 0x7f0: 0x0c8e, 0x7f1: 0x0d82, 0x7f2: 0x0ece, 0x7f3: 0x0ff2, 0x7f4: 0x117a, 0x7f5: 0x128e, + 0x7f6: 0x12a6, 0x7f7: 0x13ca, 0x7f8: 0x14f2, 0x7f9: 0x15a6, 0x7fa: 0x15c2, 0x7fb: 0x102e, + 0x7fc: 0x106e, 0x7fd: 0x1126, 0x7fe: 0x1246, 0x7ff: 0x147e, + // Block 0x20, offset 0x800 + 0x800: 0x15ce, 0x801: 0x134e, 0x802: 0x09ca, 0x803: 0x0b3e, 0x804: 0x10de, 0x805: 0x119e, + 0x806: 0x0f02, 0x807: 0x1036, 0x808: 0x139a, 0x809: 0x14ea, 0x80a: 0x09c6, 0x80b: 0x0a92, + 0x80c: 0x0d7a, 0x80d: 0x0e2e, 0x80e: 0x0e62, 0x80f: 0x1116, 0x810: 0x113e, 0x811: 0x14aa, + 0x812: 0x0852, 0x813: 0x11aa, 0x814: 0x07f6, 0x815: 0x07f2, 0x816: 0x109a, 0x817: 0x112a, + 0x818: 0x125e, 0x819: 0x14b2, 0x81a: 0x136a, 0x81b: 0x0c2a, 0x81c: 0x0d76, 0x81d: 0x135a, + 0x81e: 0x06fa, 0x81f: 0x0a66, 0x820: 0x0b96, 0x821: 0x0f32, 0x822: 0x0fb2, 0x823: 0x0876, + 0x824: 0x103e, 0x825: 0x0762, 0x826: 0x0b7a, 0x827: 0x06da, 0x828: 0x0dee, 0x829: 0x0ca6, + 0x82a: 0x1112, 0x82b: 0x08ca, 0x82c: 0x09b6, 0x82d: 0x0ffe, 0x82e: 0x1266, 0x82f: 0x133e, + 0x830: 0x0dba, 0x831: 0x13fa, 0x832: 0x0de6, 0x833: 0x0c3a, 0x834: 0x121e, 0x835: 0x0c5a, + 0x836: 0x0fae, 0x837: 0x072e, 0x838: 0x07aa, 0x839: 0x07ee, 0x83a: 0x0d56, 0x83b: 0x10fe, + 0x83c: 0x11f6, 0x83d: 0x134a, 0x83e: 0x145e, 0x83f: 0x085e, + // Block 0x21, offset 0x840 + 0x840: 0x0912, 0x841: 0x0a1a, 0x842: 0x0b32, 0x843: 0x0cc2, 0x844: 0x0e7e, 0x845: 0x1042, + 0x846: 0x149a, 0x847: 0x157e, 0x848: 0x15d2, 0x849: 0x15ea, 0x84a: 0x083a, 0x84b: 0x0cf6, + 0x84c: 0x0da6, 0x84d: 0x13ee, 0x84e: 0x0afe, 0x84f: 0x0bda, 0x850: 0x0bf6, 0x851: 0x0c86, + 0x852: 0x0e6e, 0x853: 0x0eba, 0x854: 0x0f6a, 0x855: 0x108e, 0x856: 0x1132, 0x857: 0x1196, + 0x858: 0x13de, 0x859: 0x126e, 0x85a: 0x1406, 0x85b: 0x1482, 0x85c: 0x0812, 0x85d: 0x083e, + 0x85e: 0x0926, 0x85f: 0x0eaa, 0x860: 0x12f6, 0x861: 0x133e, 0x862: 0x0b1e, 0x863: 0x0b8e, + 0x864: 0x0c52, 0x865: 0x0db2, 0x866: 0x10da, 0x867: 0x0f26, 0x868: 0x073e, 0x869: 0x0982, + 0x86a: 0x0a66, 0x86b: 0x0aca, 0x86c: 0x0b9a, 0x86d: 0x0f42, 0x86e: 0x0f5e, 0x86f: 0x116e, + 0x870: 0x118e, 0x871: 0x1466, 0x872: 0x14e6, 0x873: 0x14f6, 0x874: 0x1532, 0x875: 0x0756, + 0x876: 0x1082, 0x877: 0x1452, 0x878: 0x14ce, 0x879: 0x0bb2, 0x87a: 0x071a, 0x87b: 0x077a, + 0x87c: 0x0a6a, 0x87d: 0x0a8a, 0x87e: 0x0cb2, 0x87f: 0x0d76, + // Block 0x22, offset 0x880 + 0x880: 0x0ec6, 0x881: 0x0fce, 0x882: 0x127a, 0x883: 0x141a, 0x884: 0x1626, 0x885: 0x0ce6, + 0x886: 0x14a6, 0x887: 0x0836, 0x888: 0x0d32, 0x889: 0x0d3e, 0x88a: 0x0e12, 0x88b: 0x0e4a, + 0x88c: 0x0f4e, 0x88d: 0x0faa, 0x88e: 0x102a, 0x88f: 0x110e, 0x890: 0x153e, 0x891: 0x07b2, + 0x892: 0x0c06, 0x893: 0x14b6, 0x894: 0x076a, 0x895: 0x0aae, 0x896: 0x0e32, 0x897: 0x13e2, + 0x898: 0x0b6a, 0x899: 0x0bba, 0x89a: 0x0d46, 0x89b: 0x0f32, 0x89c: 0x14be, 0x89d: 0x081a, + 0x89e: 0x0902, 0x89f: 0x0a9a, 0x8a0: 0x0cd6, 0x8a1: 0x0d22, 0x8a2: 0x0d62, 0x8a3: 0x0df6, + 0x8a4: 0x0f4a, 0x8a5: 0x0fbe, 0x8a6: 0x115a, 0x8a7: 0x12fa, 0x8a8: 0x1306, 0x8a9: 0x145a, + 0x8aa: 0x14da, 0x8ab: 0x0886, 0x8ac: 0x0e4e, 0x8ad: 0x0906, 0x8ae: 0x0eca, 0x8af: 0x0f6e, + 0x8b0: 0x128a, 0x8b1: 0x14c2, 0x8b2: 0x15ae, 0x8b3: 0x15d6, 0x8b4: 0x0d3a, 0x8b5: 0x0e2a, + 0x8b6: 0x11c6, 0x8b7: 0x10ba, 0x8b8: 0x10c6, 0x8b9: 0x10ea, 0x8ba: 0x0f1a, 0x8bb: 0x0ea2, + 0x8bc: 0x1366, 0x8bd: 0x0736, 0x8be: 0x122e, 0x8bf: 0x081e, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080e, 0x8c1: 0x0b0e, 0x8c2: 0x0c2e, 0x8c3: 0x10f6, 0x8c4: 0x0a56, 0x8c5: 0x0e06, + 0x8c6: 0x0cf2, 0x8c7: 0x13ea, 0x8c8: 0x12ea, 0x8c9: 0x14ae, 0x8ca: 0x1326, 0x8cb: 0x0b2a, + 0x8cc: 0x078a, 0x8cd: 0x095e, 0x8d0: 0x09b2, + 0x8d2: 0x0ce2, 0x8d5: 0x07fa, 0x8d6: 0x0f22, 0x8d7: 0x0fe6, + 0x8d8: 0x104a, 0x8d9: 0x1066, 0x8da: 0x106a, 0x8db: 0x107e, 0x8dc: 0x14fe, 0x8dd: 0x10ee, + 0x8de: 0x1172, 0x8e0: 0x1292, 0x8e2: 0x1356, + 0x8e5: 0x140a, 0x8e6: 0x1436, + 0x8ea: 0x1552, 0x8eb: 0x1556, 0x8ec: 0x155a, 0x8ed: 0x15be, 0x8ee: 0x142e, 0x8ef: 0x14ca, + 0x8f0: 0x075a, 0x8f1: 0x077e, 0x8f2: 0x0792, 0x8f3: 0x084e, 0x8f4: 0x085a, 0x8f5: 0x089a, + 0x8f6: 0x094e, 0x8f7: 0x096a, 0x8f8: 0x0972, 0x8f9: 0x09ae, 0x8fa: 0x09ba, 0x8fb: 0x0a96, + 0x8fc: 0x0a9e, 0x8fd: 0x0ba6, 0x8fe: 0x0bce, 0x8ff: 0x0bd6, + // Block 0x24, offset 0x900 + 0x900: 0x0bee, 0x901: 0x0c9a, 0x902: 0x0cca, 0x903: 0x0cea, 0x904: 0x0d5a, 0x905: 0x0e1e, + 0x906: 0x0e3a, 0x907: 0x0e6a, 0x908: 0x0ebe, 0x909: 0x0ede, 0x90a: 0x0f52, 0x90b: 0x1032, + 0x90c: 0x104e, 0x90d: 0x1056, 0x90e: 0x1052, 0x90f: 0x105a, 0x910: 0x105e, 0x911: 0x1062, + 0x912: 0x1076, 0x913: 0x107a, 0x914: 0x109e, 0x915: 0x10b2, 0x916: 0x10ce, 0x917: 0x1132, + 0x918: 0x113a, 0x919: 0x1142, 0x91a: 0x1156, 0x91b: 0x117e, 0x91c: 0x11ce, 0x91d: 0x1202, + 0x91e: 0x1202, 0x91f: 0x126a, 0x920: 0x1312, 0x921: 0x132a, 0x922: 0x135e, 0x923: 0x1362, + 0x924: 0x13a6, 0x925: 0x13aa, 0x926: 0x1402, 0x927: 0x140a, 0x928: 0x14de, 0x929: 0x1522, + 0x92a: 0x153a, 0x92b: 0x0b9e, 0x92c: 0x1721, 0x92d: 0x11e6, + 0x930: 0x06e2, 0x931: 0x07e6, 0x932: 0x07a6, 0x933: 0x074e, 0x934: 0x078e, 0x935: 0x07ba, + 0x936: 0x084a, 0x937: 0x0866, 0x938: 0x094e, 0x939: 0x093a, 0x93a: 0x094a, 0x93b: 0x0966, + 0x93c: 0x09b2, 0x93d: 0x09c2, 0x93e: 0x0a06, 0x93f: 0x0a12, + // Block 0x25, offset 0x940 + 0x940: 0x0a2e, 0x941: 0x0a3e, 0x942: 0x0b26, 0x943: 0x0b2e, 0x944: 0x0b5e, 0x945: 0x0b7e, + 0x946: 0x0bae, 0x947: 0x0bc6, 0x948: 0x0bb6, 0x949: 0x0bd6, 0x94a: 0x0bca, 0x94b: 0x0bee, + 0x94c: 0x0c0a, 0x94d: 0x0c62, 0x94e: 0x0c6e, 0x94f: 0x0c76, 0x950: 0x0c9e, 0x951: 0x0ce2, + 0x952: 0x0d12, 0x953: 0x0d16, 0x954: 0x0d2a, 0x955: 0x0daa, 0x956: 0x0dba, 0x957: 0x0e12, + 0x958: 0x0e5e, 0x959: 0x0e56, 0x95a: 0x0e6a, 0x95b: 0x0e86, 0x95c: 0x0ebe, 0x95d: 0x1016, + 0x95e: 0x0ee2, 0x95f: 0x0f16, 0x960: 0x0f22, 0x961: 0x0f62, 0x962: 0x0f7e, 0x963: 0x0fa2, + 0x964: 0x0fc6, 0x965: 0x0fca, 0x966: 0x0fe6, 0x967: 0x0fea, 0x968: 0x0ffa, 0x969: 0x100e, + 0x96a: 0x100a, 0x96b: 0x103a, 0x96c: 0x10b6, 0x96d: 0x10ce, 0x96e: 0x10e6, 0x96f: 0x111e, + 0x970: 0x1132, 0x971: 0x114e, 0x972: 0x117e, 0x973: 0x1232, 0x974: 0x125a, 0x975: 0x12ce, + 0x976: 0x1316, 0x977: 0x1322, 0x978: 0x132a, 0x979: 0x1342, 0x97a: 0x1356, 0x97b: 0x1346, + 0x97c: 0x135e, 0x97d: 0x135a, 0x97e: 0x1352, 0x97f: 0x1362, + // Block 0x26, offset 0x980 + 0x980: 0x136e, 0x981: 0x13aa, 0x982: 0x13e6, 0x983: 0x1416, 0x984: 0x144e, 0x985: 0x146e, + 0x986: 0x14ba, 0x987: 0x14de, 0x988: 0x14fe, 0x989: 0x1512, 0x98a: 0x1522, 0x98b: 0x152e, + 0x98c: 0x153a, 0x98d: 0x158e, 0x98e: 0x162e, 0x98f: 0x16b8, 0x990: 0x16b3, 0x991: 0x16e5, + 0x992: 0x060a, 0x993: 0x0632, 0x994: 0x0636, 0x995: 0x1767, 0x996: 0x1794, 0x997: 0x180c, + 0x998: 0x161a, 0x999: 0x162a, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fe, 0x9c1: 0x06f6, 0x9c2: 0x0706, 0x9c3: 0x164a, 0x9c4: 0x074a, 0x9c5: 0x075a, + 0x9c6: 0x075e, 0x9c7: 0x0766, 0x9c8: 0x076e, 0x9c9: 0x0772, 0x9ca: 0x077e, 0x9cb: 0x0776, + 0x9cc: 0x05b6, 0x9cd: 0x165e, 0x9ce: 0x0792, 0x9cf: 0x0796, 0x9d0: 0x079a, 0x9d1: 0x07b6, + 0x9d2: 0x164f, 0x9d3: 0x05ba, 0x9d4: 0x07a2, 0x9d5: 0x07c2, 0x9d6: 0x1659, 0x9d7: 0x07d2, + 0x9d8: 0x07da, 0x9d9: 0x073a, 0x9da: 0x07e2, 0x9db: 0x07e6, 0x9dc: 0x1834, 0x9dd: 0x0802, + 0x9de: 0x080a, 0x9df: 0x05c2, 0x9e0: 0x0822, 0x9e1: 0x0826, 0x9e2: 0x082e, 0x9e3: 0x0832, + 0x9e4: 0x05c6, 0x9e5: 0x084a, 0x9e6: 0x084e, 0x9e7: 0x085a, 0x9e8: 0x0866, 0x9e9: 0x086a, + 0x9ea: 0x086e, 0x9eb: 0x0876, 0x9ec: 0x0896, 0x9ed: 0x089a, 0x9ee: 0x08a2, 0x9ef: 0x08b2, + 0x9f0: 0x08ba, 0x9f1: 0x08be, 0x9f2: 0x08be, 0x9f3: 0x08be, 0x9f4: 0x166d, 0x9f5: 0x0e96, + 0x9f6: 0x08d2, 0x9f7: 0x08da, 0x9f8: 0x1672, 0x9f9: 0x08e6, 0x9fa: 0x08ee, 0x9fb: 0x08f6, + 0x9fc: 0x091e, 0x9fd: 0x090a, 0x9fe: 0x0916, 0x9ff: 0x091a, + // Block 0x28, offset 0xa00 + 0xa00: 0x0922, 0xa01: 0x092a, 0xa02: 0x092e, 0xa03: 0x0936, 0xa04: 0x093e, 0xa05: 0x0942, + 0xa06: 0x0942, 0xa07: 0x094a, 0xa08: 0x0952, 0xa09: 0x0956, 0xa0a: 0x0962, 0xa0b: 0x0986, + 0xa0c: 0x096a, 0xa0d: 0x098a, 0xa0e: 0x096e, 0xa0f: 0x0976, 0xa10: 0x080e, 0xa11: 0x09d2, + 0xa12: 0x099a, 0xa13: 0x099e, 0xa14: 0x09a2, 0xa15: 0x0996, 0xa16: 0x09aa, 0xa17: 0x09a6, + 0xa18: 0x09be, 0xa19: 0x1677, 0xa1a: 0x09da, 0xa1b: 0x09de, 0xa1c: 0x09e6, 0xa1d: 0x09f2, + 0xa1e: 0x09fa, 0xa1f: 0x0a16, 0xa20: 0x167c, 0xa21: 0x1681, 0xa22: 0x0a22, 0xa23: 0x0a26, + 0xa24: 0x0a2a, 0xa25: 0x0a1e, 0xa26: 0x0a32, 0xa27: 0x05ca, 0xa28: 0x05ce, 0xa29: 0x0a3a, + 0xa2a: 0x0a42, 0xa2b: 0x0a42, 0xa2c: 0x1686, 0xa2d: 0x0a5e, 0xa2e: 0x0a62, 0xa2f: 0x0a66, + 0xa30: 0x0a6e, 0xa31: 0x168b, 0xa32: 0x0a76, 0xa33: 0x0a7a, 0xa34: 0x0b52, 0xa35: 0x0a82, + 0xa36: 0x05d2, 0xa37: 0x0a8e, 0xa38: 0x0a9e, 0xa39: 0x0aaa, 0xa3a: 0x0aa6, 0xa3b: 0x1695, + 0xa3c: 0x0ab2, 0xa3d: 0x169a, 0xa3e: 0x0abe, 0xa3f: 0x0aba, + // Block 0x29, offset 0xa40 + 0xa40: 0x0ac2, 0xa41: 0x0ad2, 0xa42: 0x0ad6, 0xa43: 0x05d6, 0xa44: 0x0ae6, 0xa45: 0x0aee, + 0xa46: 0x0af2, 0xa47: 0x0af6, 0xa48: 0x05da, 0xa49: 0x169f, 0xa4a: 0x05de, 0xa4b: 0x0b12, + 0xa4c: 0x0b16, 0xa4d: 0x0b1a, 0xa4e: 0x0b22, 0xa4f: 0x1866, 0xa50: 0x0b3a, 0xa51: 0x16a9, + 0xa52: 0x16a9, 0xa53: 0x11da, 0xa54: 0x0b4a, 0xa55: 0x0b4a, 0xa56: 0x05e2, 0xa57: 0x16cc, + 0xa58: 0x179e, 0xa59: 0x0b5a, 0xa5a: 0x0b62, 0xa5b: 0x05e6, 0xa5c: 0x0b76, 0xa5d: 0x0b86, + 0xa5e: 0x0b8a, 0xa5f: 0x0b92, 0xa60: 0x0ba2, 0xa61: 0x05ee, 0xa62: 0x05ea, 0xa63: 0x0ba6, + 0xa64: 0x16ae, 0xa65: 0x0baa, 0xa66: 0x0bbe, 0xa67: 0x0bc2, 0xa68: 0x0bc6, 0xa69: 0x0bc2, + 0xa6a: 0x0bd2, 0xa6b: 0x0bd6, 0xa6c: 0x0be6, 0xa6d: 0x0bde, 0xa6e: 0x0be2, 0xa6f: 0x0bea, + 0xa70: 0x0bee, 0xa71: 0x0bf2, 0xa72: 0x0bfe, 0xa73: 0x0c02, 0xa74: 0x0c1a, 0xa75: 0x0c22, + 0xa76: 0x0c32, 0xa77: 0x0c46, 0xa78: 0x16bd, 0xa79: 0x0c42, 0xa7a: 0x0c36, 0xa7b: 0x0c4e, + 0xa7c: 0x0c56, 0xa7d: 0x0c6a, 0xa7e: 0x16c2, 0xa7f: 0x0c72, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c66, 0xa81: 0x0c5e, 0xa82: 0x05f2, 0xa83: 0x0c7a, 0xa84: 0x0c82, 0xa85: 0x0c8a, + 0xa86: 0x0c7e, 0xa87: 0x05f6, 0xa88: 0x0c9a, 0xa89: 0x0ca2, 0xa8a: 0x16c7, 0xa8b: 0x0cce, + 0xa8c: 0x0d02, 0xa8d: 0x0cde, 0xa8e: 0x0602, 0xa8f: 0x0cea, 0xa90: 0x05fe, 0xa91: 0x05fa, + 0xa92: 0x07c6, 0xa93: 0x07ca, 0xa94: 0x0d06, 0xa95: 0x0cee, 0xa96: 0x11ae, 0xa97: 0x0666, + 0xa98: 0x0d12, 0xa99: 0x0d16, 0xa9a: 0x0d1a, 0xa9b: 0x0d2e, 0xa9c: 0x0d26, 0xa9d: 0x16e0, + 0xa9e: 0x0606, 0xa9f: 0x0d42, 0xaa0: 0x0d36, 0xaa1: 0x0d52, 0xaa2: 0x0d5a, 0xaa3: 0x16ea, + 0xaa4: 0x0d5e, 0xaa5: 0x0d4a, 0xaa6: 0x0d66, 0xaa7: 0x060a, 0xaa8: 0x0d6a, 0xaa9: 0x0d6e, + 0xaaa: 0x0d72, 0xaab: 0x0d7e, 0xaac: 0x16ef, 0xaad: 0x0d86, 0xaae: 0x060e, 0xaaf: 0x0d92, + 0xab0: 0x16f4, 0xab1: 0x0d96, 0xab2: 0x0612, 0xab3: 0x0da2, 0xab4: 0x0dae, 0xab5: 0x0dba, + 0xab6: 0x0dbe, 0xab7: 0x16f9, 0xab8: 0x1690, 0xab9: 0x16fe, 0xaba: 0x0dde, 0xabb: 0x1703, + 0xabc: 0x0dea, 0xabd: 0x0df2, 0xabe: 0x0de2, 0xabf: 0x0dfe, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0e, 0xac1: 0x0e1e, 0xac2: 0x0e12, 0xac3: 0x0e16, 0xac4: 0x0e22, 0xac5: 0x0e26, + 0xac6: 0x1708, 0xac7: 0x0e0a, 0xac8: 0x0e3e, 0xac9: 0x0e42, 0xaca: 0x0616, 0xacb: 0x0e56, + 0xacc: 0x0e52, 0xacd: 0x170d, 0xace: 0x0e36, 0xacf: 0x0e72, 0xad0: 0x1712, 0xad1: 0x1717, + 0xad2: 0x0e76, 0xad3: 0x0e8a, 0xad4: 0x0e86, 0xad5: 0x0e82, 0xad6: 0x061a, 0xad7: 0x0e8e, + 0xad8: 0x0e9e, 0xad9: 0x0e9a, 0xada: 0x0ea6, 0xadb: 0x1654, 0xadc: 0x0eb6, 0xadd: 0x171c, + 0xade: 0x0ec2, 0xadf: 0x1726, 0xae0: 0x0ed6, 0xae1: 0x0ee2, 0xae2: 0x0ef6, 0xae3: 0x172b, + 0xae4: 0x0f0a, 0xae5: 0x0f0e, 0xae6: 0x1730, 0xae7: 0x1735, 0xae8: 0x0f2a, 0xae9: 0x0f3a, + 0xaea: 0x061e, 0xaeb: 0x0f3e, 0xaec: 0x0622, 0xaed: 0x0622, 0xaee: 0x0f56, 0xaef: 0x0f5a, + 0xaf0: 0x0f62, 0xaf1: 0x0f66, 0xaf2: 0x0f72, 0xaf3: 0x0626, 0xaf4: 0x0f8a, 0xaf5: 0x173a, + 0xaf6: 0x0fa6, 0xaf7: 0x173f, 0xaf8: 0x0fb2, 0xaf9: 0x16a4, 0xafa: 0x0fc2, 0xafb: 0x1744, + 0xafc: 0x1749, 0xafd: 0x174e, 0xafe: 0x062a, 0xaff: 0x062e, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ffa, 0xb01: 0x1758, 0xb02: 0x1753, 0xb03: 0x175d, 0xb04: 0x1762, 0xb05: 0x1002, + 0xb06: 0x1006, 0xb07: 0x1006, 0xb08: 0x100e, 0xb09: 0x0636, 0xb0a: 0x1012, 0xb0b: 0x063a, + 0xb0c: 0x063e, 0xb0d: 0x176c, 0xb0e: 0x1026, 0xb0f: 0x102e, 0xb10: 0x103a, 0xb11: 0x0642, + 0xb12: 0x1771, 0xb13: 0x105e, 0xb14: 0x1776, 0xb15: 0x177b, 0xb16: 0x107e, 0xb17: 0x1096, + 0xb18: 0x0646, 0xb19: 0x109e, 0xb1a: 0x10a2, 0xb1b: 0x10a6, 0xb1c: 0x1780, 0xb1d: 0x1785, + 0xb1e: 0x1785, 0xb1f: 0x10be, 0xb20: 0x064a, 0xb21: 0x178a, 0xb22: 0x10d2, 0xb23: 0x10d6, + 0xb24: 0x064e, 0xb25: 0x178f, 0xb26: 0x10f2, 0xb27: 0x0652, 0xb28: 0x1102, 0xb29: 0x10fa, + 0xb2a: 0x110a, 0xb2b: 0x1799, 0xb2c: 0x1122, 0xb2d: 0x0656, 0xb2e: 0x112e, 0xb2f: 0x1136, + 0xb30: 0x1146, 0xb31: 0x065a, 0xb32: 0x17a3, 0xb33: 0x17a8, 0xb34: 0x065e, 0xb35: 0x17ad, + 0xb36: 0x115e, 0xb37: 0x17b2, 0xb38: 0x116a, 0xb39: 0x1176, 0xb3a: 0x117e, 0xb3b: 0x17b7, + 0xb3c: 0x17bc, 0xb3d: 0x1192, 0xb3e: 0x17c1, 0xb3f: 0x119a, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16d1, 0xb41: 0x0662, 0xb42: 0x11b2, 0xb43: 0x11b6, 0xb44: 0x066a, 0xb45: 0x11ba, + 0xb46: 0x0a36, 0xb47: 0x17c6, 0xb48: 0x17cb, 0xb49: 0x16d6, 0xb4a: 0x16db, 0xb4b: 0x11da, + 0xb4c: 0x11de, 0xb4d: 0x13f6, 0xb4e: 0x066e, 0xb4f: 0x120a, 0xb50: 0x1206, 0xb51: 0x120e, + 0xb52: 0x0842, 0xb53: 0x1212, 0xb54: 0x1216, 0xb55: 0x121a, 0xb56: 0x1222, 0xb57: 0x17d0, + 0xb58: 0x121e, 0xb59: 0x1226, 0xb5a: 0x123a, 0xb5b: 0x123e, 0xb5c: 0x122a, 0xb5d: 0x1242, + 0xb5e: 0x1256, 0xb5f: 0x126a, 0xb60: 0x1236, 0xb61: 0x124a, 0xb62: 0x124e, 0xb63: 0x1252, + 0xb64: 0x17d5, 0xb65: 0x17df, 0xb66: 0x17da, 0xb67: 0x0672, 0xb68: 0x1272, 0xb69: 0x1276, + 0xb6a: 0x127e, 0xb6b: 0x17f3, 0xb6c: 0x1282, 0xb6d: 0x17e4, 0xb6e: 0x0676, 0xb6f: 0x067a, + 0xb70: 0x17e9, 0xb71: 0x17ee, 0xb72: 0x067e, 0xb73: 0x12a2, 0xb74: 0x12a6, 0xb75: 0x12aa, + 0xb76: 0x12ae, 0xb77: 0x12ba, 0xb78: 0x12b6, 0xb79: 0x12c2, 0xb7a: 0x12be, 0xb7b: 0x12ce, + 0xb7c: 0x12c6, 0xb7d: 0x12ca, 0xb7e: 0x12d2, 0xb7f: 0x0682, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12da, 0xb81: 0x12de, 0xb82: 0x0686, 0xb83: 0x12ee, 0xb84: 0x12f2, 0xb85: 0x17f8, + 0xb86: 0x12fe, 0xb87: 0x1302, 0xb88: 0x068a, 0xb89: 0x130e, 0xb8a: 0x05be, 0xb8b: 0x17fd, + 0xb8c: 0x1802, 0xb8d: 0x068e, 0xb8e: 0x0692, 0xb8f: 0x133a, 0xb90: 0x1352, 0xb91: 0x136e, + 0xb92: 0x137e, 0xb93: 0x1807, 0xb94: 0x1392, 0xb95: 0x1396, 0xb96: 0x13ae, 0xb97: 0x13ba, + 0xb98: 0x1811, 0xb99: 0x1663, 0xb9a: 0x13c6, 0xb9b: 0x13c2, 0xb9c: 0x13ce, 0xb9d: 0x1668, + 0xb9e: 0x13da, 0xb9f: 0x13e6, 0xba0: 0x1816, 0xba1: 0x181b, 0xba2: 0x1426, 0xba3: 0x1432, + 0xba4: 0x143a, 0xba5: 0x1820, 0xba6: 0x143e, 0xba7: 0x146a, 0xba8: 0x1476, 0xba9: 0x147a, + 0xbaa: 0x1472, 0xbab: 0x1486, 0xbac: 0x148a, 0xbad: 0x1825, 0xbae: 0x1496, 0xbaf: 0x0696, + 0xbb0: 0x149e, 0xbb1: 0x182a, 0xbb2: 0x069a, 0xbb3: 0x14d6, 0xbb4: 0x0ac6, 0xbb5: 0x14ee, + 0xbb6: 0x182f, 0xbb7: 0x1839, 0xbb8: 0x069e, 0xbb9: 0x06a2, 0xbba: 0x1516, 0xbbb: 0x183e, + 0xbbc: 0x06a6, 0xbbd: 0x1843, 0xbbe: 0x152e, 0xbbf: 0x152e, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1536, 0xbc1: 0x1848, 0xbc2: 0x154e, 0xbc3: 0x06aa, 0xbc4: 0x155e, 0xbc5: 0x156a, + 0xbc6: 0x1572, 0xbc7: 0x157a, 0xbc8: 0x06ae, 0xbc9: 0x184d, 0xbca: 0x158e, 0xbcb: 0x15aa, + 0xbcc: 0x15b6, 0xbcd: 0x06b2, 0xbce: 0x06b6, 0xbcf: 0x15ba, 0xbd0: 0x1852, 0xbd1: 0x06ba, + 0xbd2: 0x1857, 0xbd3: 0x185c, 0xbd4: 0x1861, 0xbd5: 0x15de, 0xbd6: 0x06be, 0xbd7: 0x15f2, + 0xbd8: 0x15fa, 0xbd9: 0x15fe, 0xbda: 0x1606, 0xbdb: 0x160e, 0xbdc: 0x1616, 0xbdd: 0x186b, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16b: 0x64, 0x16c: 0x0e, 0x16d: 0x65, 0x16e: 0x66, 0x16f: 0x67, + 0x170: 0x68, 0x173: 0x69, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6a, 0x183: 0x6b, 0x184: 0x6c, 0x186: 0x6d, 0x187: 0x6e, + 0x188: 0x6f, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x70, 0x18c: 0x71, + 0x1ab: 0x72, + 0x1b3: 0x73, 0x1b5: 0x74, 0x1b7: 0x75, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x76, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x77, 0x1c5: 0x78, + 0x1c9: 0x79, 0x1cc: 0x7a, 0x1cd: 0x7b, + // Block 0x8, offset 0x200 + 0x219: 0x7c, 0x21a: 0x7d, 0x21b: 0x7e, + 0x220: 0x7f, 0x223: 0x80, 0x224: 0x81, 0x225: 0x82, 0x226: 0x83, 0x227: 0x84, + 0x22a: 0x85, 0x22b: 0x86, 0x22f: 0x87, + 0x230: 0x88, 0x231: 0x89, 0x232: 0x8a, 0x233: 0x8b, 0x234: 0x8c, 0x235: 0x8d, 0x236: 0x8e, 0x237: 0x88, + 0x238: 0x89, 0x239: 0x8a, 0x23a: 0x8b, 0x23b: 0x8c, 0x23c: 0x8d, 0x23d: 0x8e, 0x23e: 0x88, 0x23f: 0x89, + // Block 0x9, offset 0x240 + 0x240: 0x8a, 0x241: 0x8b, 0x242: 0x8c, 0x243: 0x8d, 0x244: 0x8e, 0x245: 0x88, 0x246: 0x89, 0x247: 0x8a, + 0x248: 0x8b, 0x249: 0x8c, 0x24a: 0x8d, 0x24b: 0x8e, 0x24c: 0x88, 0x24d: 0x89, 0x24e: 0x8a, 0x24f: 0x8b, + 0x250: 0x8c, 0x251: 0x8d, 0x252: 0x8e, 0x253: 0x88, 0x254: 0x89, 0x255: 0x8a, 0x256: 0x8b, 0x257: 0x8c, + 0x258: 0x8d, 0x259: 0x8e, 0x25a: 0x88, 0x25b: 0x89, 0x25c: 0x8a, 0x25d: 0x8b, 0x25e: 0x8c, 0x25f: 0x8d, + 0x260: 0x8e, 0x261: 0x88, 0x262: 0x89, 0x263: 0x8a, 0x264: 0x8b, 0x265: 0x8c, 0x266: 0x8d, 0x267: 0x8e, + 0x268: 0x88, 0x269: 0x89, 0x26a: 0x8a, 0x26b: 0x8b, 0x26c: 0x8c, 0x26d: 0x8d, 0x26e: 0x8e, 0x26f: 0x88, + 0x270: 0x89, 0x271: 0x8a, 0x272: 0x8b, 0x273: 0x8c, 0x274: 0x8d, 0x275: 0x8e, 0x276: 0x88, 0x277: 0x89, + 0x278: 0x8a, 0x279: 0x8b, 0x27a: 0x8c, 0x27b: 0x8d, 0x27c: 0x8e, 0x27d: 0x88, 0x27e: 0x89, 0x27f: 0x8a, + // Block 0xa, offset 0x280 + 0x280: 0x8b, 0x281: 0x8c, 0x282: 0x8d, 0x283: 0x8e, 0x284: 0x88, 0x285: 0x89, 0x286: 0x8a, 0x287: 0x8b, + 0x288: 0x8c, 0x289: 0x8d, 0x28a: 0x8e, 0x28b: 0x88, 0x28c: 0x89, 0x28d: 0x8a, 0x28e: 0x8b, 0x28f: 0x8c, + 0x290: 0x8d, 0x291: 0x8e, 0x292: 0x88, 0x293: 0x89, 0x294: 0x8a, 0x295: 0x8b, 0x296: 0x8c, 0x297: 0x8d, + 0x298: 0x8e, 0x299: 0x88, 0x29a: 0x89, 0x29b: 0x8a, 0x29c: 0x8b, 0x29d: 0x8c, 0x29e: 0x8d, 0x29f: 0x8e, + 0x2a0: 0x88, 0x2a1: 0x89, 0x2a2: 0x8a, 0x2a3: 0x8b, 0x2a4: 0x8c, 0x2a5: 0x8d, 0x2a6: 0x8e, 0x2a7: 0x88, + 0x2a8: 0x89, 0x2a9: 0x8a, 0x2aa: 0x8b, 0x2ab: 0x8c, 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x88, 0x2af: 0x89, + 0x2b0: 0x8a, 0x2b1: 0x8b, 0x2b2: 0x8c, 0x2b3: 0x8d, 0x2b4: 0x8e, 0x2b5: 0x88, 0x2b6: 0x89, 0x2b7: 0x8a, + 0x2b8: 0x8b, 0x2b9: 0x8c, 0x2ba: 0x8d, 0x2bb: 0x8e, 0x2bc: 0x88, 0x2bd: 0x89, 0x2be: 0x8a, 0x2bf: 0x8b, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8c, 0x2c1: 0x8d, 0x2c2: 0x8e, 0x2c3: 0x88, 0x2c4: 0x89, 0x2c5: 0x8a, 0x2c6: 0x8b, 0x2c7: 0x8c, + 0x2c8: 0x8d, 0x2c9: 0x8e, 0x2ca: 0x88, 0x2cb: 0x89, 0x2cc: 0x8a, 0x2cd: 0x8b, 0x2ce: 0x8c, 0x2cf: 0x8d, + 0x2d0: 0x8e, 0x2d1: 0x88, 0x2d2: 0x89, 0x2d3: 0x8a, 0x2d4: 0x8b, 0x2d5: 0x8c, 0x2d6: 0x8d, 0x2d7: 0x8e, + 0x2d8: 0x88, 0x2d9: 0x89, 0x2da: 0x8a, 0x2db: 0x8b, 0x2dc: 0x8c, 0x2dd: 0x8d, 0x2de: 0x8f, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x90, 0x32d: 0x91, 0x32e: 0x92, + 0x331: 0x93, 0x332: 0x94, 0x333: 0x95, 0x334: 0x96, + 0x338: 0x97, 0x339: 0x98, 0x33a: 0x99, 0x33b: 0x9a, 0x33e: 0x9b, 0x33f: 0x9c, + // Block 0xd, offset 0x340 + 0x347: 0x9d, + 0x34b: 0x9e, 0x34d: 0x9f, + 0x368: 0xa0, 0x36b: 0xa1, + 0x374: 0xa2, + 0x37a: 0xa3, 0x37d: 0xa4, + // Block 0xe, offset 0x380 + 0x381: 0xa5, 0x382: 0xa6, 0x384: 0xa7, 0x385: 0x83, 0x387: 0xa8, + 0x388: 0xa9, 0x38b: 0xaa, 0x38c: 0xab, 0x38d: 0xac, + 0x391: 0xad, 0x392: 0xae, 0x393: 0xaf, 0x396: 0xb0, 0x397: 0xb1, + 0x398: 0x74, 0x39a: 0xb2, 0x39c: 0xb3, + 0x3a0: 0xb4, 0x3a4: 0xb5, 0x3a5: 0xb6, 0x3a7: 0xb7, + 0x3a8: 0xb8, 0x3a9: 0xb9, 0x3aa: 0xba, + 0x3b0: 0x74, 0x3b5: 0xbb, 0x3b6: 0xbc, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xbd, 0x3ec: 0xbe, + 0x3ff: 0xbf, + // Block 0x10, offset 0x400 + 0x432: 0xc0, + // Block 0x11, offset 0x440 + 0x445: 0xc1, 0x446: 0xc2, 0x447: 0xc3, + 0x449: 0xc4, + // Block 0x12, offset 0x480 + 0x480: 0xc5, 0x484: 0xbe, + 0x48b: 0xc6, + 0x4a3: 0xc7, 0x4a5: 0xc8, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc9, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 156 entries, 312 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x120, 0x122, 0x12b, 0x12d, 0x130, 0x132, 0x13d, 0x141, 0x14f, 0x152, 0x158, 0x15e, 0x169, 0x16d, 0x16f, 0x171, 0x173, 0x175, 0x177, 0x17d, 0x181, 0x183, 0x185, 0x18d, 0x191, 0x194, 0x196, 0x198, 0x19b, 0x19e, 0x1a0, 0x1a2, 0x1a4, 0x1a6, 0x1ac, 0x1af, 0x1b1, 0x1b8, 0x1be, 0x1c4, 0x1cc, 0x1d2, 0x1d8, 0x1de, 0x1e2, 0x1f0, 0x1f9, 0x1fc, 0x1ff, 0x201, 0x204, 0x206, 0x20a, 0x20f, 0x211, 0x213, 0x218, 0x21e, 0x220, 0x222, 0x224, 0x22a, 0x22d, 0x22f, 0x231, 0x237, 0x23a, 0x242, 0x249, 0x24c, 0x24f, 0x251, 0x254, 0x25c, 0x260, 0x267, 0x26a, 0x270, 0x272, 0x275, 0x277, 0x27a, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x289, 0x28c, 0x28e, 0x290, 0x292, 0x294, 0x296, 0x2a3, 0x2ad, 0x2af, 0x2b1, 0x2b7, 0x2b9, 0x2bb, 0x2be} + +// nfcSparseValues: 704 entries, 2816 bytes +var nfcSparseValues = [704]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4857, lo: 0x8a, hi: 0x8a}, + {value: 0x4875, lo: 0x8b, hi: 0x8b}, + {value: 0x36de, lo: 0x8c, hi: 0x8c}, + {value: 0x36f6, lo: 0x8d, hi: 0x8d}, + {value: 0x488d, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3714, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xd0 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd2 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd7 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe8 + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf4 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf6 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfc + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfe + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x108 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x110 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x113 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x116 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x120 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x122 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x12d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x130 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x132 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x13d + {value: 0x0004, lo: 0x03}, + {value: 0x0436, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x141 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3e, offset 0x14f + {value: 0x4292, lo: 0x02}, + {value: 0x01bb, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3f, offset 0x152 + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x40, offset 0x158 + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x41, offset 0x15e + {value: 0x63f1, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x42, offset 0x169 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x43, offset 0x16d + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x44, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x45, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x46, offset 0x173 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x47, offset 0x175 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x03}, + {value: 0x4ab6, lo: 0xb3, hi: 0xb3}, + {value: 0x4ab6, lo: 0xb5, hi: 0xb6}, + {value: 0x4ab6, lo: 0xba, hi: 0xbf}, + // Block 0x4a, offset 0x181 + {value: 0x0000, lo: 0x01}, + {value: 0x4ab6, lo: 0x8f, hi: 0xa3}, + // Block 0x4b, offset 0x183 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4c, offset 0x185 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4e, offset 0x191 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x4f, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x50, offset 0x196 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x51, offset 0x198 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x52, offset 0x19b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x53, offset 0x19e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x54, offset 0x1a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x55, offset 0x1a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x56, offset 0x1a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x57, offset 0x1a6 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x58, offset 0x1ac + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x59, offset 0x1af + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5a, offset 0x1b1 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5b, offset 0x1b8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5c, offset 0x1be + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5d, offset 0x1c4 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5e, offset 0x1cc + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5f, offset 0x1d2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x60, offset 0x1d8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x61, offset 0x1de + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x62, offset 0x1e2 + {value: 0x0006, lo: 0x0d}, + {value: 0x43a7, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4419, lo: 0x9f, hi: 0x9f}, + {value: 0x4407, lo: 0xaa, hi: 0xab}, + {value: 0x450b, lo: 0xac, hi: 0xac}, + {value: 0x4513, lo: 0xad, hi: 0xad}, + {value: 0x435f, lo: 0xae, hi: 0xb1}, + {value: 0x437d, lo: 0xb2, hi: 0xb4}, + {value: 0x4395, lo: 0xb5, hi: 0xb6}, + {value: 0x43a1, lo: 0xb8, hi: 0xb8}, + {value: 0x43ad, lo: 0xb9, hi: 0xbb}, + {value: 0x43c5, lo: 0xbc, hi: 0xbc}, + {value: 0x43cb, lo: 0xbe, hi: 0xbe}, + // Block 0x63, offset 0x1f0 + {value: 0x0006, lo: 0x08}, + {value: 0x43d1, lo: 0x80, hi: 0x81}, + {value: 0x43dd, lo: 0x83, hi: 0x84}, + {value: 0x43ef, lo: 0x86, hi: 0x89}, + {value: 0x4413, lo: 0x8a, hi: 0x8a}, + {value: 0x438f, lo: 0x8b, hi: 0x8b}, + {value: 0x4377, lo: 0x8c, hi: 0x8c}, + {value: 0x43bf, lo: 0x8d, hi: 0x8d}, + {value: 0x43e9, lo: 0x8e, hi: 0x8e}, + // Block 0x64, offset 0x1f9 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x65, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x66, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x67, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x68, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x69, offset 0x206 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6b, offset 0x20f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6c, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x4ab6, lo: 0x9e, hi: 0x9f}, + {value: 0x4ab6, lo: 0xa3, hi: 0xa3}, + {value: 0x4ab6, lo: 0xa5, hi: 0xa6}, + {value: 0x4ab6, lo: 0xaa, hi: 0xaf}, + // Block 0x6e, offset 0x218 + {value: 0x0000, lo: 0x05}, + {value: 0x4ab6, lo: 0x82, hi: 0x87}, + {value: 0x4ab6, lo: 0x8a, hi: 0x8f}, + {value: 0x4ab6, lo: 0x92, hi: 0x97}, + {value: 0x4ab6, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6f, offset 0x21e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x70, offset 0x220 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x71, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x72, offset 0x224 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x22a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x74, offset 0x22d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x75, offset 0x22f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x76, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x77, offset 0x237 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x78, offset 0x23a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x79, offset 0x242 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7a, offset 0x249 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7b, offset 0x24c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7c, offset 0x24f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7d, offset 0x251 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7e, offset 0x254 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7f, offset 0x25c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x80, offset 0x260 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x267 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x82, offset 0x26a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x83, offset 0x270 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x84, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x27a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x27f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x281 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x283 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x287 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x289 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x28c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x28e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x90, offset 0x290 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x91, offset 0x292 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x92, offset 0x294 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x93, offset 0x296 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x94, offset 0x2a3 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x95, offset 0x2ad + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x96, offset 0x2af + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x97, offset 0x2b1 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x98, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x99, offset 0x2b9 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0x9a, offset 0x2bb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x9b, offset 0x2be + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 18768 bytes (18.33 KiB). Checksum: c51186dd2412943d. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x132: 0x1960, 0x133: 0x19ed, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, 0x13f: 0x1bb2, + // Block 0x5, offset 0x140 + 0x140: 0x1c3a, 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, 0x149: 0x1c62, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2e05, 0x185: 0x2e0b, + 0x186: 0x2e11, 0x187: 0x1975, 0x188: 0x1978, 0x189: 0x1a0e, 0x18a: 0x198d, 0x18b: 0x1990, + 0x18c: 0x1a44, 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b1: 0x1945, 0x1b2: 0x1948, 0x1b3: 0x19d5, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x42bc, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x4271, 0x285: 0x4492, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4857, 0x2cb: 0x4875, + 0x2cc: 0x36de, 0x2cd: 0x36f6, 0x2ce: 0x488d, 0x2d0: 0x01c1, 0x2d1: 0x01d3, + 0x2d2: 0x01af, 0x2d3: 0x4323, 0x2d4: 0x4329, 0x2d5: 0x01fd, 0x2d6: 0x01eb, + 0x2f0: 0x01d9, 0x2f1: 0x01ee, 0x2f2: 0x01f1, 0x2f4: 0x018b, 0x2f5: 0x01ca, + 0x2f9: 0x01a9, + // Block 0xc, offset 0x300 + 0x300: 0x3738, 0x301: 0x3744, 0x303: 0x3732, + 0x306: 0xa000, 0x307: 0x3720, + 0x30c: 0x3774, 0x30d: 0x375c, 0x30e: 0x3786, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3768, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37ec, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x374a, 0x342: 0x37ce, + 0x350: 0x3726, 0x351: 0x37aa, + 0x352: 0x372c, 0x353: 0x37b0, 0x356: 0x373e, 0x357: 0x37c2, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3840, 0x35b: 0x3846, 0x35c: 0x3750, 0x35d: 0x37d4, + 0x35e: 0x3756, 0x35f: 0x37da, 0x362: 0x3762, 0x363: 0x37e6, + 0x364: 0x376e, 0x365: 0x37f2, 0x366: 0x377a, 0x367: 0x37fe, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x384c, 0x36b: 0x3852, 0x36c: 0x37a4, 0x36d: 0x3828, 0x36e: 0x3780, 0x36f: 0x3804, + 0x370: 0x378c, 0x371: 0x3810, 0x372: 0x3792, 0x373: 0x3816, 0x374: 0x3798, 0x375: 0x381c, + 0x378: 0x379e, 0x379: 0x3822, + // Block 0xe, offset 0x380 + 0x387: 0x1d67, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1d8a, + 0x3f6: 0x2019, 0x3f7: 0x2055, 0x3f8: 0x2050, + // Block 0x10, offset 0x400 + 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d33, 0x447: 0xa000, 0x448: 0x2d3b, 0x449: 0xa000, 0x44a: 0x2d43, 0x44b: 0xa000, + 0x44c: 0x2d4b, 0x44d: 0xa000, 0x44e: 0x2d53, 0x451: 0xa000, + 0x452: 0x2d5b, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d63, + 0x47c: 0xa000, 0x47d: 0x2d6b, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0416, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x041a, 0x495: 0x041e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0426, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x042a, 0x49d: 0x01c1, + 0x49e: 0x01c4, 0x49f: 0x01c7, 0x4a0: 0x01fd, 0x4a1: 0x0200, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01c1, 0x4a7: 0x01c4, 0x4a8: 0x01ee, 0x4a9: 0x01fd, + 0x4aa: 0x0200, + 0x4b8: 0x020f, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042e, 0x4e8: 0x016d, 0x4e9: 0x0128, + 0x4ea: 0x0432, 0x4eb: 0x0170, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x0422, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x0161, 0x4fd: 0x0164, 0x4fe: 0x0167, 0x4ff: 0x01d3, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x2fae, 0x541: 0x32ba, 0x542: 0x2fb8, 0x543: 0x32c4, 0x544: 0x2fbd, 0x545: 0x32c9, + 0x546: 0x2fc2, 0x547: 0x32ce, 0x548: 0x38e3, 0x549: 0x3a72, 0x54a: 0x2fdb, 0x54b: 0x32e7, + 0x54c: 0x2fe5, 0x54d: 0x32f1, 0x54e: 0x2ff4, 0x54f: 0x3300, 0x550: 0x2fea, 0x551: 0x32f6, + 0x552: 0x2fef, 0x553: 0x32fb, 0x554: 0x3906, 0x555: 0x3a95, 0x556: 0x390d, 0x557: 0x3a9c, + 0x558: 0x3030, 0x559: 0x333c, 0x55a: 0x3035, 0x55b: 0x3341, 0x55c: 0x391b, 0x55d: 0x3aaa, + 0x55e: 0x303a, 0x55f: 0x3346, 0x560: 0x3049, 0x561: 0x3355, 0x562: 0x3067, 0x563: 0x3373, + 0x564: 0x3076, 0x565: 0x3382, 0x566: 0x306c, 0x567: 0x3378, 0x568: 0x307b, 0x569: 0x3387, + 0x56a: 0x3080, 0x56b: 0x338c, 0x56c: 0x30c6, 0x56d: 0x33d2, 0x56e: 0x3922, 0x56f: 0x3ab1, + 0x570: 0x30d0, 0x571: 0x33e1, 0x572: 0x30da, 0x573: 0x33eb, 0x574: 0x30e4, 0x575: 0x33f5, + 0x576: 0x46db, 0x577: 0x476c, 0x578: 0x3929, 0x579: 0x3ab8, 0x57a: 0x30fd, 0x57b: 0x340e, + 0x57c: 0x30f8, 0x57d: 0x3409, 0x57e: 0x3102, 0x57f: 0x3413, + // Block 0x16, offset 0x580 + 0x580: 0x3107, 0x581: 0x3418, 0x582: 0x310c, 0x583: 0x341d, 0x584: 0x3120, 0x585: 0x3431, + 0x586: 0x312a, 0x587: 0x343b, 0x588: 0x3139, 0x589: 0x344a, 0x58a: 0x3134, 0x58b: 0x3445, + 0x58c: 0x394c, 0x58d: 0x3adb, 0x58e: 0x395a, 0x58f: 0x3ae9, 0x590: 0x3961, 0x591: 0x3af0, + 0x592: 0x3968, 0x593: 0x3af7, 0x594: 0x3166, 0x595: 0x3477, 0x596: 0x316b, 0x597: 0x347c, + 0x598: 0x3175, 0x599: 0x3486, 0x59a: 0x4708, 0x59b: 0x4799, 0x59c: 0x39ae, 0x59d: 0x3b3d, + 0x59e: 0x318e, 0x59f: 0x349f, 0x5a0: 0x3198, 0x5a1: 0x34a9, 0x5a2: 0x4717, 0x5a3: 0x47a8, + 0x5a4: 0x39b5, 0x5a5: 0x3b44, 0x5a6: 0x39bc, 0x5a7: 0x3b4b, 0x5a8: 0x39c3, 0x5a9: 0x3b52, + 0x5aa: 0x31a7, 0x5ab: 0x34b8, 0x5ac: 0x31b1, 0x5ad: 0x34c7, 0x5ae: 0x31c5, 0x5af: 0x34db, + 0x5b0: 0x31c0, 0x5b1: 0x34d6, 0x5b2: 0x3201, 0x5b3: 0x3517, 0x5b4: 0x3210, 0x5b5: 0x3526, + 0x5b6: 0x320b, 0x5b7: 0x3521, 0x5b8: 0x39ca, 0x5b9: 0x3b59, 0x5ba: 0x39d1, 0x5bb: 0x3b60, + 0x5bc: 0x3215, 0x5bd: 0x352b, 0x5be: 0x321a, 0x5bf: 0x3530, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x321f, 0x5c1: 0x3535, 0x5c2: 0x3224, 0x5c3: 0x353a, 0x5c4: 0x3233, 0x5c5: 0x3549, + 0x5c6: 0x322e, 0x5c7: 0x3544, 0x5c8: 0x3238, 0x5c9: 0x3553, 0x5ca: 0x323d, 0x5cb: 0x3558, + 0x5cc: 0x3242, 0x5cd: 0x355d, 0x5ce: 0x3260, 0x5cf: 0x357b, 0x5d0: 0x3279, 0x5d1: 0x3599, + 0x5d2: 0x3288, 0x5d3: 0x35a8, 0x5d4: 0x328d, 0x5d5: 0x35ad, 0x5d6: 0x3391, 0x5d7: 0x34bd, + 0x5d8: 0x354e, 0x5d9: 0x358a, 0x5da: 0x1be6, 0x5db: 0x42ee, + 0x5e0: 0x46b8, 0x5e1: 0x4749, 0x5e2: 0x2f9a, 0x5e3: 0x32a6, + 0x5e4: 0x388f, 0x5e5: 0x3a1e, 0x5e6: 0x3888, 0x5e7: 0x3a17, 0x5e8: 0x389d, 0x5e9: 0x3a2c, + 0x5ea: 0x3896, 0x5eb: 0x3a25, 0x5ec: 0x38d5, 0x5ed: 0x3a64, 0x5ee: 0x38ab, 0x5ef: 0x3a3a, + 0x5f0: 0x38a4, 0x5f1: 0x3a33, 0x5f2: 0x38b9, 0x5f3: 0x3a48, 0x5f4: 0x38b2, 0x5f5: 0x3a41, + 0x5f6: 0x38dc, 0x5f7: 0x3a6b, 0x5f8: 0x46cc, 0x5f9: 0x475d, 0x5fa: 0x3017, 0x5fb: 0x3323, + 0x5fc: 0x3003, 0x5fd: 0x330f, 0x5fe: 0x38f1, 0x5ff: 0x3a80, + // Block 0x18, offset 0x600 + 0x600: 0x38ea, 0x601: 0x3a79, 0x602: 0x38ff, 0x603: 0x3a8e, 0x604: 0x38f8, 0x605: 0x3a87, + 0x606: 0x3914, 0x607: 0x3aa3, 0x608: 0x30a8, 0x609: 0x33b4, 0x60a: 0x30bc, 0x60b: 0x33c8, + 0x60c: 0x46fe, 0x60d: 0x478f, 0x60e: 0x314d, 0x60f: 0x345e, 0x610: 0x3937, 0x611: 0x3ac6, + 0x612: 0x3930, 0x613: 0x3abf, 0x614: 0x3945, 0x615: 0x3ad4, 0x616: 0x393e, 0x617: 0x3acd, + 0x618: 0x39a0, 0x619: 0x3b2f, 0x61a: 0x3984, 0x61b: 0x3b13, 0x61c: 0x397d, 0x61d: 0x3b0c, + 0x61e: 0x3992, 0x61f: 0x3b21, 0x620: 0x398b, 0x621: 0x3b1a, 0x622: 0x3999, 0x623: 0x3b28, + 0x624: 0x31fc, 0x625: 0x3512, 0x626: 0x31de, 0x627: 0x34f4, 0x628: 0x39fb, 0x629: 0x3b8a, + 0x62a: 0x39f4, 0x62b: 0x3b83, 0x62c: 0x3a09, 0x62d: 0x3b98, 0x62e: 0x3a02, 0x62f: 0x3b91, + 0x630: 0x3a10, 0x631: 0x3b9f, 0x632: 0x3247, 0x633: 0x3562, 0x634: 0x326f, 0x635: 0x358f, + 0x636: 0x326a, 0x637: 0x3585, 0x638: 0x3256, 0x639: 0x3571, + // Block 0x19, offset 0x640 + 0x640: 0x481b, 0x641: 0x4821, 0x642: 0x4935, 0x643: 0x494d, 0x644: 0x493d, 0x645: 0x4955, + 0x646: 0x4945, 0x647: 0x495d, 0x648: 0x47c1, 0x649: 0x47c7, 0x64a: 0x48a5, 0x64b: 0x48bd, + 0x64c: 0x48ad, 0x64d: 0x48c5, 0x64e: 0x48b5, 0x64f: 0x48cd, 0x650: 0x482d, 0x651: 0x4833, + 0x652: 0x3dcf, 0x653: 0x3ddf, 0x654: 0x3dd7, 0x655: 0x3de7, + 0x658: 0x47cd, 0x659: 0x47d3, 0x65a: 0x3cff, 0x65b: 0x3d0f, 0x65c: 0x3d07, 0x65d: 0x3d17, + 0x660: 0x4845, 0x661: 0x484b, 0x662: 0x4965, 0x663: 0x497d, + 0x664: 0x496d, 0x665: 0x4985, 0x666: 0x4975, 0x667: 0x498d, 0x668: 0x47d9, 0x669: 0x47df, + 0x66a: 0x48d5, 0x66b: 0x48ed, 0x66c: 0x48dd, 0x66d: 0x48f5, 0x66e: 0x48e5, 0x66f: 0x48fd, + 0x670: 0x485d, 0x671: 0x4863, 0x672: 0x3e2f, 0x673: 0x3e47, 0x674: 0x3e37, 0x675: 0x3e4f, + 0x676: 0x3e3f, 0x677: 0x3e57, 0x678: 0x47e5, 0x679: 0x47eb, 0x67a: 0x3d2f, 0x67b: 0x3d47, + 0x67c: 0x3d37, 0x67d: 0x3d4f, 0x67e: 0x3d3f, 0x67f: 0x3d57, + // Block 0x1a, offset 0x680 + 0x680: 0x4869, 0x681: 0x486f, 0x682: 0x3e5f, 0x683: 0x3e6f, 0x684: 0x3e67, 0x685: 0x3e77, + 0x688: 0x47f1, 0x689: 0x47f7, 0x68a: 0x3d5f, 0x68b: 0x3d6f, + 0x68c: 0x3d67, 0x68d: 0x3d77, 0x690: 0x487b, 0x691: 0x4881, + 0x692: 0x3e97, 0x693: 0x3eaf, 0x694: 0x3e9f, 0x695: 0x3eb7, 0x696: 0x3ea7, 0x697: 0x3ebf, + 0x699: 0x47fd, 0x69b: 0x3d7f, 0x69d: 0x3d87, + 0x69f: 0x3d8f, 0x6a0: 0x4893, 0x6a1: 0x4899, 0x6a2: 0x4995, 0x6a3: 0x49ad, + 0x6a4: 0x499d, 0x6a5: 0x49b5, 0x6a6: 0x49a5, 0x6a7: 0x49bd, 0x6a8: 0x4803, 0x6a9: 0x4809, + 0x6aa: 0x4905, 0x6ab: 0x491d, 0x6ac: 0x490d, 0x6ad: 0x4925, 0x6ae: 0x4915, 0x6af: 0x492d, + 0x6b0: 0x480f, 0x6b1: 0x4335, 0x6b2: 0x36a8, 0x6b3: 0x433b, 0x6b4: 0x4839, 0x6b5: 0x4341, + 0x6b6: 0x36ba, 0x6b7: 0x4347, 0x6b8: 0x36d8, 0x6b9: 0x434d, 0x6ba: 0x36f0, 0x6bb: 0x4353, + 0x6bc: 0x4887, 0x6bd: 0x4359, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3db7, 0x6c1: 0x3dbf, 0x6c2: 0x419b, 0x6c3: 0x41b9, 0x6c4: 0x41a5, 0x6c5: 0x41c3, + 0x6c6: 0x41af, 0x6c7: 0x41cd, 0x6c8: 0x3cef, 0x6c9: 0x3cf7, 0x6ca: 0x40e7, 0x6cb: 0x4105, + 0x6cc: 0x40f1, 0x6cd: 0x410f, 0x6ce: 0x40fb, 0x6cf: 0x4119, 0x6d0: 0x3dff, 0x6d1: 0x3e07, + 0x6d2: 0x41d7, 0x6d3: 0x41f5, 0x6d4: 0x41e1, 0x6d5: 0x41ff, 0x6d6: 0x41eb, 0x6d7: 0x4209, + 0x6d8: 0x3d1f, 0x6d9: 0x3d27, 0x6da: 0x4123, 0x6db: 0x4141, 0x6dc: 0x412d, 0x6dd: 0x414b, + 0x6de: 0x4137, 0x6df: 0x4155, 0x6e0: 0x3ed7, 0x6e1: 0x3edf, 0x6e2: 0x4213, 0x6e3: 0x4231, + 0x6e4: 0x421d, 0x6e5: 0x423b, 0x6e6: 0x4227, 0x6e7: 0x4245, 0x6e8: 0x3d97, 0x6e9: 0x3d9f, + 0x6ea: 0x415f, 0x6eb: 0x417d, 0x6ec: 0x4169, 0x6ed: 0x4187, 0x6ee: 0x4173, 0x6ef: 0x4191, + 0x6f0: 0x369c, 0x6f1: 0x3696, 0x6f2: 0x3da7, 0x6f3: 0x36a2, 0x6f4: 0x3daf, + 0x6f6: 0x4827, 0x6f7: 0x3dc7, 0x6f8: 0x360c, 0x6f9: 0x3606, 0x6fa: 0x35fa, 0x6fb: 0x4305, + 0x6fc: 0x3612, 0x6fd: 0x429e, 0x6fe: 0x01d6, 0x6ff: 0x429e, + // Block 0x1c, offset 0x700 + 0x700: 0x42b7, 0x701: 0x4499, 0x702: 0x3def, 0x703: 0x36b4, 0x704: 0x3df7, + 0x706: 0x4851, 0x707: 0x3e0f, 0x708: 0x3618, 0x709: 0x430b, 0x70a: 0x3624, 0x70b: 0x4311, + 0x70c: 0x3630, 0x70d: 0x44a0, 0x70e: 0x44a7, 0x70f: 0x44ae, 0x710: 0x36cc, 0x711: 0x36c6, + 0x712: 0x3e17, 0x713: 0x44fb, 0x716: 0x36d2, 0x717: 0x3e27, + 0x718: 0x3648, 0x719: 0x3642, 0x71a: 0x3636, 0x71b: 0x4317, 0x71d: 0x44b5, + 0x71e: 0x44bc, 0x71f: 0x44c3, 0x720: 0x3702, 0x721: 0x36fc, 0x722: 0x3e7f, 0x723: 0x4503, + 0x724: 0x36e4, 0x725: 0x36ea, 0x726: 0x3708, 0x727: 0x3e8f, 0x728: 0x3678, 0x729: 0x3672, + 0x72a: 0x3666, 0x72b: 0x4323, 0x72c: 0x3660, 0x72d: 0x448b, 0x72e: 0x4492, 0x72f: 0x0081, + 0x732: 0x3ec7, 0x733: 0x370e, 0x734: 0x3ecf, + 0x736: 0x489f, 0x737: 0x3ee7, 0x738: 0x3654, 0x739: 0x431d, 0x73a: 0x3684, 0x73b: 0x432f, + 0x73c: 0x3690, 0x73d: 0x4271, 0x73e: 0x42a3, + // Block 0x1d, offset 0x740 + 0x740: 0x1bde, 0x741: 0x1be2, 0x742: 0x0047, 0x743: 0x1c5a, 0x745: 0x1bee, + 0x746: 0x1bf2, 0x747: 0x00e9, 0x749: 0x1c5e, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1993, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x19a5, 0x761: 0x1bce, 0x762: 0x19ae, + 0x764: 0x0075, 0x766: 0x01bb, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42e9, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0218, + 0x776: 0x021b, 0x777: 0x021e, 0x778: 0x0221, 0x779: 0x0093, 0x77b: 0x1b9e, + 0x77c: 0x01eb, 0x77d: 0x01c4, 0x77e: 0x017c, 0x77f: 0x01a3, + // Block 0x1e, offset 0x780 + 0x780: 0x0466, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x2234, 0x791: 0x2240, + 0x792: 0x22f4, 0x793: 0x221c, 0x794: 0x22a0, 0x795: 0x2228, 0x796: 0x22a6, 0x797: 0x22be, + 0x798: 0x22ca, 0x799: 0x222e, 0x79a: 0x22d0, 0x79b: 0x223a, 0x79c: 0x22c4, 0x79d: 0x22d6, + 0x79e: 0x22dc, 0x79f: 0x1cc2, 0x7a0: 0x0053, 0x7a1: 0x195d, 0x7a2: 0x1baa, 0x7a3: 0x1966, + 0x7a4: 0x006d, 0x7a5: 0x19b1, 0x7a6: 0x1bd6, 0x7a7: 0x1d4e, 0x7a8: 0x1969, 0x7a9: 0x0071, + 0x7aa: 0x19bd, 0x7ab: 0x1bda, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19ea, 0x7b2: 0x1c1e, 0x7b3: 0x19f3, 0x7b4: 0x00ad, 0x7b5: 0x1a68, + 0x7b6: 0x1c52, 0x7b7: 0x1d62, 0x7b8: 0x19f6, 0x7b9: 0x00b1, 0x7ba: 0x1a6b, 0x7bb: 0x1c56, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c1d, 0x7c3: 0xa000, 0x7c4: 0x3c24, 0x7c5: 0xa000, + 0x7c7: 0x3c2b, 0x7c8: 0xa000, 0x7c9: 0x3c32, + 0x7cd: 0xa000, + 0x7e0: 0x2f7c, 0x7e1: 0xa000, 0x7e2: 0x3c40, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c39, 0x7ee: 0x2f77, 0x7ef: 0x2f81, + 0x7f0: 0x3c47, 0x7f1: 0x3c4e, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c55, 0x7f5: 0x3c5c, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c63, 0x7f9: 0x3c6a, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c71, 0x801: 0x3c78, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c8d, 0x805: 0x3c94, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c9b, 0x809: 0x3ca2, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3cb7, 0x82d: 0x3cbe, 0x82e: 0x3cc5, 0x82f: 0x3ccc, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1885, + 0x86a: 0x1888, 0x86b: 0x188b, 0x86c: 0x188e, 0x86d: 0x1891, 0x86e: 0x1894, 0x86f: 0x1897, + 0x870: 0x189a, 0x871: 0x189d, 0x872: 0x18a0, 0x873: 0x18a9, 0x874: 0x1a6e, 0x875: 0x1a72, + 0x876: 0x1a76, 0x877: 0x1a7a, 0x878: 0x1a7e, 0x879: 0x1a82, 0x87a: 0x1a86, 0x87b: 0x1a8a, + 0x87c: 0x1a8e, 0x87d: 0x1c86, 0x87e: 0x1c8b, 0x87f: 0x1c90, + // Block 0x22, offset 0x880 + 0x880: 0x1c95, 0x881: 0x1c9a, 0x882: 0x1c9f, 0x883: 0x1ca4, 0x884: 0x1ca9, 0x885: 0x1cae, + 0x886: 0x1cb3, 0x887: 0x1cb8, 0x888: 0x1882, 0x889: 0x18a6, 0x88a: 0x18ca, 0x88b: 0x18ee, + 0x88c: 0x1912, 0x88d: 0x191b, 0x88e: 0x1921, 0x88f: 0x1927, 0x890: 0x192d, 0x891: 0x1b66, + 0x892: 0x1b6a, 0x893: 0x1b6e, 0x894: 0x1b72, 0x895: 0x1b76, 0x896: 0x1b7a, 0x897: 0x1b7e, + 0x898: 0x1b82, 0x899: 0x1b86, 0x89a: 0x1b8a, 0x89b: 0x1b8e, 0x89c: 0x1afa, 0x89d: 0x1afe, + 0x89e: 0x1b02, 0x89f: 0x1b06, 0x8a0: 0x1b0a, 0x8a1: 0x1b0e, 0x8a2: 0x1b12, 0x8a3: 0x1b16, + 0x8a4: 0x1b1a, 0x8a5: 0x1b1e, 0x8a6: 0x1b22, 0x8a7: 0x1b26, 0x8a8: 0x1b2a, 0x8a9: 0x1b2e, + 0x8aa: 0x1b32, 0x8ab: 0x1b36, 0x8ac: 0x1b3a, 0x8ad: 0x1b3e, 0x8ae: 0x1b42, 0x8af: 0x1b46, + 0x8b0: 0x1b4a, 0x8b1: 0x1b4e, 0x8b2: 0x1b52, 0x8b3: 0x1b56, 0x8b4: 0x1b5a, 0x8b5: 0x1b5e, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06c2, 0x8c1: 0x06e6, 0x8c2: 0x06f2, 0x8c3: 0x0702, 0x8c4: 0x070a, 0x8c5: 0x0716, + 0x8c6: 0x071e, 0x8c7: 0x0726, 0x8c8: 0x0732, 0x8c9: 0x0786, 0x8ca: 0x079e, 0x8cb: 0x07ae, + 0x8cc: 0x07be, 0x8cd: 0x07ce, 0x8ce: 0x07de, 0x8cf: 0x07fe, 0x8d0: 0x0802, 0x8d1: 0x0806, + 0x8d2: 0x083a, 0x8d3: 0x0862, 0x8d4: 0x0872, 0x8d5: 0x087a, 0x8d6: 0x087e, 0x8d7: 0x088a, + 0x8d8: 0x08a6, 0x8d9: 0x08aa, 0x8da: 0x08c2, 0x8db: 0x08c6, 0x8dc: 0x08ce, 0x8dd: 0x08de, + 0x8de: 0x097a, 0x8df: 0x098e, 0x8e0: 0x09ce, 0x8e1: 0x09e2, 0x8e2: 0x09ea, 0x8e3: 0x09ee, + 0x8e4: 0x09fe, 0x8e5: 0x0a1a, 0x8e6: 0x0a46, 0x8e7: 0x0a52, 0x8e8: 0x0a72, 0x8e9: 0x0a7e, + 0x8ea: 0x0a82, 0x8eb: 0x0a86, 0x8ec: 0x0a9e, 0x8ed: 0x0aa2, 0x8ee: 0x0ace, 0x8ef: 0x0ada, + 0x8f0: 0x0ae2, 0x8f1: 0x0aea, 0x8f2: 0x0afa, 0x8f3: 0x0b02, 0x8f4: 0x0b0a, 0x8f5: 0x0b36, + 0x8f6: 0x0b3a, 0x8f7: 0x0b42, 0x8f8: 0x0b46, 0x8f9: 0x0b4e, 0x8fa: 0x0b56, 0x8fb: 0x0b66, + 0x8fc: 0x0b82, 0x8fd: 0x0bfa, 0x8fe: 0x0c0e, 0x8ff: 0x0c12, + // Block 0x24, offset 0x900 + 0x900: 0x0c92, 0x901: 0x0c96, 0x902: 0x0caa, 0x903: 0x0cae, 0x904: 0x0cb6, 0x905: 0x0cbe, + 0x906: 0x0cc6, 0x907: 0x0cd2, 0x908: 0x0cfa, 0x909: 0x0d0a, 0x90a: 0x0d1e, 0x90b: 0x0d8e, + 0x90c: 0x0d9a, 0x90d: 0x0daa, 0x90e: 0x0db6, 0x90f: 0x0dc2, 0x910: 0x0dca, 0x911: 0x0dce, + 0x912: 0x0dd2, 0x913: 0x0dd6, 0x914: 0x0dda, 0x915: 0x0e92, 0x916: 0x0eda, 0x917: 0x0ee6, + 0x918: 0x0eea, 0x919: 0x0eee, 0x91a: 0x0ef2, 0x91b: 0x0efa, 0x91c: 0x0efe, 0x91d: 0x0f12, + 0x91e: 0x0f2e, 0x91f: 0x0f36, 0x920: 0x0f76, 0x921: 0x0f7a, 0x922: 0x0f82, 0x923: 0x0f86, + 0x924: 0x0f8e, 0x925: 0x0f92, 0x926: 0x0fb6, 0x927: 0x0fba, 0x928: 0x0fd6, 0x929: 0x0fda, + 0x92a: 0x0fde, 0x92b: 0x0fe2, 0x92c: 0x0ff6, 0x92d: 0x101a, 0x92e: 0x101e, 0x92f: 0x1022, + 0x930: 0x1046, 0x931: 0x1086, 0x932: 0x108a, 0x933: 0x10aa, 0x934: 0x10ba, 0x935: 0x10c2, + 0x936: 0x10e2, 0x937: 0x1106, 0x938: 0x114a, 0x939: 0x1152, 0x93a: 0x1166, 0x93b: 0x1172, + 0x93c: 0x117a, 0x93d: 0x1182, 0x93e: 0x1186, 0x93f: 0x118a, + // Block 0x25, offset 0x940 + 0x940: 0x11a2, 0x941: 0x11a6, 0x942: 0x11c2, 0x943: 0x11ca, 0x944: 0x11d2, 0x945: 0x11d6, + 0x946: 0x11e2, 0x947: 0x11ea, 0x948: 0x11ee, 0x949: 0x11f2, 0x94a: 0x11fa, 0x94b: 0x11fe, + 0x94c: 0x129e, 0x94d: 0x12b2, 0x94e: 0x12e6, 0x94f: 0x12ea, 0x950: 0x12f2, 0x951: 0x131e, + 0x952: 0x1326, 0x953: 0x132e, 0x954: 0x1336, 0x955: 0x1372, 0x956: 0x1376, 0x957: 0x137e, + 0x958: 0x1382, 0x959: 0x1386, 0x95a: 0x13b2, 0x95b: 0x13b6, 0x95c: 0x13be, 0x95d: 0x13d2, + 0x95e: 0x13d6, 0x95f: 0x13f2, 0x960: 0x13fa, 0x961: 0x13fe, 0x962: 0x1422, 0x963: 0x1442, + 0x964: 0x1456, 0x965: 0x145a, 0x966: 0x1462, 0x967: 0x148e, 0x968: 0x1492, 0x969: 0x14a2, + 0x96a: 0x14c6, 0x96b: 0x14d2, 0x96c: 0x14e2, 0x96d: 0x14fa, 0x96e: 0x1502, 0x96f: 0x1506, + 0x970: 0x150a, 0x971: 0x150e, 0x972: 0x151a, 0x973: 0x151e, 0x974: 0x1526, 0x975: 0x1542, + 0x976: 0x1546, 0x977: 0x154a, 0x978: 0x1562, 0x979: 0x1566, 0x97a: 0x156e, 0x97b: 0x1582, + 0x97c: 0x1586, 0x97d: 0x158a, 0x97e: 0x1592, 0x97f: 0x1596, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f1f, 0x98d: 0xa000, 0x98e: 0x3f27, 0x98f: 0xa000, 0x990: 0x3f2f, 0x991: 0xa000, + 0x992: 0x3f37, 0x993: 0xa000, 0x994: 0x3f3f, 0x995: 0xa000, 0x996: 0x3f47, 0x997: 0xa000, + 0x998: 0x3f4f, 0x999: 0xa000, 0x99a: 0x3f57, 0x99b: 0xa000, 0x99c: 0x3f5f, 0x99d: 0xa000, + 0x99e: 0x3f67, 0x99f: 0xa000, 0x9a0: 0x3f6f, 0x9a1: 0xa000, 0x9a2: 0x3f77, + 0x9a4: 0xa000, 0x9a5: 0x3f7f, 0x9a6: 0xa000, 0x9a7: 0x3f87, 0x9a8: 0xa000, 0x9a9: 0x3f8f, + 0x9af: 0xa000, + 0x9b0: 0x3f97, 0x9b1: 0x3f9f, 0x9b2: 0xa000, 0x9b3: 0x3fa7, 0x9b4: 0x3faf, 0x9b5: 0xa000, + 0x9b6: 0x3fb7, 0x9b7: 0x3fbf, 0x9b8: 0xa000, 0x9b9: 0x3fc7, 0x9ba: 0x3fcf, 0x9bb: 0xa000, + 0x9bc: 0x3fd7, 0x9bd: 0x3fdf, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f17, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x42f3, 0x9dc: 0x42f9, 0x9dd: 0xa000, + 0x9de: 0x3fe7, 0x9df: 0x26ba, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3ff7, 0x9ed: 0xa000, 0x9ee: 0x3fff, 0x9ef: 0xa000, + 0x9f0: 0x4007, 0x9f1: 0xa000, 0x9f2: 0x400f, 0x9f3: 0xa000, 0x9f4: 0x4017, 0x9f5: 0xa000, + 0x9f6: 0x401f, 0x9f7: 0xa000, 0x9f8: 0x4027, 0x9f9: 0xa000, 0x9fa: 0x402f, 0x9fb: 0xa000, + 0x9fc: 0x4037, 0x9fd: 0xa000, 0x9fe: 0x403f, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4047, 0xa01: 0xa000, 0xa02: 0x404f, 0xa04: 0xa000, 0xa05: 0x4057, + 0xa06: 0xa000, 0xa07: 0x405f, 0xa08: 0xa000, 0xa09: 0x4067, + 0xa0f: 0xa000, 0xa10: 0x406f, 0xa11: 0x4077, + 0xa12: 0xa000, 0xa13: 0x407f, 0xa14: 0x4087, 0xa15: 0xa000, 0xa16: 0x408f, 0xa17: 0x4097, + 0xa18: 0xa000, 0xa19: 0x409f, 0xa1a: 0x40a7, 0xa1b: 0xa000, 0xa1c: 0x40af, 0xa1d: 0x40b7, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fef, + 0xa37: 0x40bf, 0xa38: 0x40c7, 0xa39: 0x40cf, 0xa3a: 0x40d7, + 0xa3d: 0xa000, 0xa3e: 0x40df, 0xa3f: 0x26cf, + // Block 0x29, offset 0xa40 + 0xa40: 0x036a, 0xa41: 0x032e, 0xa42: 0x0332, 0xa43: 0x0336, 0xa44: 0x037e, 0xa45: 0x033a, + 0xa46: 0x033e, 0xa47: 0x0342, 0xa48: 0x0346, 0xa49: 0x034a, 0xa4a: 0x034e, 0xa4b: 0x0352, + 0xa4c: 0x0356, 0xa4d: 0x035a, 0xa4e: 0x035e, 0xa4f: 0x49d4, 0xa50: 0x49da, 0xa51: 0x49e0, + 0xa52: 0x49e6, 0xa53: 0x49ec, 0xa54: 0x49f2, 0xa55: 0x49f8, 0xa56: 0x49fe, 0xa57: 0x4a04, + 0xa58: 0x4a0a, 0xa59: 0x4a10, 0xa5a: 0x4a16, 0xa5b: 0x4a1c, 0xa5c: 0x4a22, 0xa5d: 0x4a28, + 0xa5e: 0x4a2e, 0xa5f: 0x4a34, 0xa60: 0x4a3a, 0xa61: 0x4a40, 0xa62: 0x4a46, 0xa63: 0x4a4c, + 0xa64: 0x03c6, 0xa65: 0x0362, 0xa66: 0x0366, 0xa67: 0x03ea, 0xa68: 0x03ee, 0xa69: 0x03f2, + 0xa6a: 0x03f6, 0xa6b: 0x03fa, 0xa6c: 0x03fe, 0xa6d: 0x0402, 0xa6e: 0x036e, 0xa6f: 0x0406, + 0xa70: 0x040a, 0xa71: 0x0372, 0xa72: 0x0376, 0xa73: 0x037a, 0xa74: 0x0382, 0xa75: 0x0386, + 0xa76: 0x038a, 0xa77: 0x038e, 0xa78: 0x0392, 0xa79: 0x0396, 0xa7a: 0x039a, 0xa7b: 0x039e, + 0xa7c: 0x03a2, 0xa7d: 0x03a6, 0xa7e: 0x03aa, 0xa7f: 0x03ae, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03b2, 0xa81: 0x03b6, 0xa82: 0x040e, 0xa83: 0x0412, 0xa84: 0x03ba, 0xa85: 0x03be, + 0xa86: 0x03c2, 0xa87: 0x03ca, 0xa88: 0x03ce, 0xa89: 0x03d2, 0xa8a: 0x03d6, 0xa8b: 0x03da, + 0xa8c: 0x03de, 0xa8d: 0x03e2, 0xa8e: 0x03e6, + 0xa92: 0x06c2, 0xa93: 0x071e, 0xa94: 0x06ce, 0xa95: 0x097e, 0xa96: 0x06d2, 0xa97: 0x06ea, + 0xa98: 0x06d6, 0xa99: 0x0f96, 0xa9a: 0x070a, 0xa9b: 0x06de, 0xa9c: 0x06c6, 0xa9d: 0x0a02, + 0xa9e: 0x0992, 0xa9f: 0x0732, + // Block 0x2b, offset 0xac0 + 0xac0: 0x205a, 0xac1: 0x2060, 0xac2: 0x2066, 0xac3: 0x206c, 0xac4: 0x2072, 0xac5: 0x2078, + 0xac6: 0x207e, 0xac7: 0x2084, 0xac8: 0x208a, 0xac9: 0x2090, 0xaca: 0x2096, 0xacb: 0x209c, + 0xacc: 0x20a2, 0xacd: 0x20a8, 0xace: 0x2733, 0xacf: 0x273c, 0xad0: 0x2745, 0xad1: 0x274e, + 0xad2: 0x2757, 0xad3: 0x2760, 0xad4: 0x2769, 0xad5: 0x2772, 0xad6: 0x277b, 0xad7: 0x278d, + 0xad8: 0x2796, 0xad9: 0x279f, 0xada: 0x27a8, 0xadb: 0x27b1, 0xadc: 0x2784, 0xadd: 0x2bb9, + 0xade: 0x2afa, 0xae0: 0x20ae, 0xae1: 0x20c6, 0xae2: 0x20ba, 0xae3: 0x210e, + 0xae4: 0x20cc, 0xae5: 0x20ea, 0xae6: 0x20b4, 0xae7: 0x20e4, 0xae8: 0x20c0, 0xae9: 0x20f6, + 0xaea: 0x2126, 0xaeb: 0x2144, 0xaec: 0x213e, 0xaed: 0x2132, 0xaee: 0x2180, 0xaef: 0x2114, + 0xaf0: 0x2120, 0xaf1: 0x2138, 0xaf2: 0x212c, 0xaf3: 0x2156, 0xaf4: 0x2102, 0xaf5: 0x214a, + 0xaf6: 0x2174, 0xaf7: 0x215c, 0xaf8: 0x20f0, 0xaf9: 0x20d2, 0xafa: 0x2108, 0xafb: 0x211a, + 0xafc: 0x2150, 0xafd: 0x20d8, 0xafe: 0x217a, 0xaff: 0x20fc, + // Block 0x2c, offset 0xb00 + 0xb00: 0x2162, 0xb01: 0x20de, 0xb02: 0x2168, 0xb03: 0x216e, 0xb04: 0x0932, 0xb05: 0x0b06, + 0xb06: 0x0caa, 0xb07: 0x10ca, + 0xb10: 0x1bca, 0xb11: 0x18ac, + 0xb12: 0x18af, 0xb13: 0x18b2, 0xb14: 0x18b5, 0xb15: 0x18b8, 0xb16: 0x18bb, 0xb17: 0x18be, + 0xb18: 0x18c1, 0xb19: 0x18c4, 0xb1a: 0x18cd, 0xb1b: 0x18d0, 0xb1c: 0x18d3, 0xb1d: 0x18d6, + 0xb1e: 0x18d9, 0xb1f: 0x18dc, 0xb20: 0x0316, 0xb21: 0x031e, 0xb22: 0x0322, 0xb23: 0x032a, + 0xb24: 0x032e, 0xb25: 0x0332, 0xb26: 0x033a, 0xb27: 0x0342, 0xb28: 0x0346, 0xb29: 0x034e, + 0xb2a: 0x0352, 0xb2b: 0x0356, 0xb2c: 0x035a, 0xb2d: 0x035e, 0xb2e: 0x2e2f, 0xb2f: 0x2e37, + 0xb30: 0x2e3f, 0xb31: 0x2e47, 0xb32: 0x2e4f, 0xb33: 0x2e57, 0xb34: 0x2e5f, 0xb35: 0x2e67, + 0xb36: 0x2e77, 0xb37: 0x2e7f, 0xb38: 0x2e87, 0xb39: 0x2e8f, 0xb3a: 0x2e97, 0xb3b: 0x2e9f, + 0xb3c: 0x2eea, 0xb3d: 0x2eb2, 0xb3e: 0x2e6f, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06c2, 0xb41: 0x071e, 0xb42: 0x06ce, 0xb43: 0x097e, 0xb44: 0x0722, 0xb45: 0x07b2, + 0xb46: 0x06ca, 0xb47: 0x07ae, 0xb48: 0x070e, 0xb49: 0x088a, 0xb4a: 0x0d0a, 0xb4b: 0x0e92, + 0xb4c: 0x0dda, 0xb4d: 0x0d1e, 0xb4e: 0x1462, 0xb4f: 0x098e, 0xb50: 0x0cd2, 0xb51: 0x0d4e, + 0xb52: 0x0d0e, 0xb53: 0x104e, 0xb54: 0x08fe, 0xb55: 0x0f06, 0xb56: 0x138a, 0xb57: 0x1062, + 0xb58: 0x0846, 0xb59: 0x1092, 0xb5a: 0x0f9e, 0xb5b: 0x0a1a, 0xb5c: 0x1412, 0xb5d: 0x0782, + 0xb5e: 0x08ae, 0xb5f: 0x0dfa, 0xb60: 0x152a, 0xb61: 0x0746, 0xb62: 0x07d6, 0xb63: 0x0d9e, + 0xb64: 0x06d2, 0xb65: 0x06ea, 0xb66: 0x06d6, 0xb67: 0x0ade, 0xb68: 0x08f2, 0xb69: 0x0882, + 0xb6a: 0x0a5a, 0xb6b: 0x0a4e, 0xb6c: 0x0fee, 0xb6d: 0x0742, 0xb6e: 0x139e, 0xb6f: 0x089e, + 0xb70: 0x09f6, 0xb71: 0x18df, 0xb72: 0x18e2, 0xb73: 0x18e5, 0xb74: 0x18e8, 0xb75: 0x18f1, + 0xb76: 0x18f4, 0xb77: 0x18f7, 0xb78: 0x18fa, 0xb79: 0x18fd, 0xb7a: 0x1900, 0xb7b: 0x1903, + 0xb7c: 0x1906, 0xb7d: 0x1909, 0xb7e: 0x190c, 0xb7f: 0x1915, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1ccc, 0xb81: 0x1cdb, 0xb82: 0x1cea, 0xb83: 0x1cf9, 0xb84: 0x1d08, 0xb85: 0x1d17, + 0xb86: 0x1d26, 0xb87: 0x1d35, 0xb88: 0x1d44, 0xb89: 0x2192, 0xb8a: 0x21a4, 0xb8b: 0x21b6, + 0xb8c: 0x1957, 0xb8d: 0x1c0a, 0xb8e: 0x19d8, 0xb8f: 0x1bae, 0xb90: 0x04ce, 0xb91: 0x04d6, + 0xb92: 0x04de, 0xb93: 0x04e6, 0xb94: 0x04ee, 0xb95: 0x04f2, 0xb96: 0x04f6, 0xb97: 0x04fa, + 0xb98: 0x04fe, 0xb99: 0x0502, 0xb9a: 0x0506, 0xb9b: 0x050a, 0xb9c: 0x050e, 0xb9d: 0x0512, + 0xb9e: 0x0516, 0xb9f: 0x051a, 0xba0: 0x051e, 0xba1: 0x0526, 0xba2: 0x052a, 0xba3: 0x052e, + 0xba4: 0x0532, 0xba5: 0x0536, 0xba6: 0x053a, 0xba7: 0x053e, 0xba8: 0x0542, 0xba9: 0x0546, + 0xbaa: 0x054a, 0xbab: 0x054e, 0xbac: 0x0552, 0xbad: 0x0556, 0xbae: 0x055a, 0xbaf: 0x055e, + 0xbb0: 0x0562, 0xbb1: 0x0566, 0xbb2: 0x056a, 0xbb3: 0x0572, 0xbb4: 0x057a, 0xbb5: 0x0582, + 0xbb6: 0x0586, 0xbb7: 0x058a, 0xbb8: 0x058e, 0xbb9: 0x0592, 0xbba: 0x0596, 0xbbb: 0x059a, + 0xbbc: 0x059e, 0xbbd: 0x05a2, 0xbbe: 0x05a6, 0xbbf: 0x2700, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b19, 0xbc1: 0x29b5, 0xbc2: 0x2b29, 0xbc3: 0x288d, 0xbc4: 0x2efb, 0xbc5: 0x2897, + 0xbc6: 0x28a1, 0xbc7: 0x2f3f, 0xbc8: 0x29c2, 0xbc9: 0x28ab, 0xbca: 0x28b5, 0xbcb: 0x28bf, + 0xbcc: 0x29e9, 0xbcd: 0x29f6, 0xbce: 0x29cf, 0xbcf: 0x29dc, 0xbd0: 0x2ec0, 0xbd1: 0x2a03, + 0xbd2: 0x2a10, 0xbd3: 0x2bcb, 0xbd4: 0x26c1, 0xbd5: 0x2bde, 0xbd6: 0x2bf1, 0xbd7: 0x2b39, + 0xbd8: 0x2a1d, 0xbd9: 0x2c04, 0xbda: 0x2c17, 0xbdb: 0x2a2a, 0xbdc: 0x28c9, 0xbdd: 0x28d3, + 0xbde: 0x2ece, 0xbdf: 0x2a37, 0xbe0: 0x2b49, 0xbe1: 0x2f0c, 0xbe2: 0x28dd, 0xbe3: 0x28e7, + 0xbe4: 0x2a44, 0xbe5: 0x28f1, 0xbe6: 0x28fb, 0xbe7: 0x26d6, 0xbe8: 0x26dd, 0xbe9: 0x2905, + 0xbea: 0x290f, 0xbeb: 0x2c2a, 0xbec: 0x2a51, 0xbed: 0x2b59, 0xbee: 0x2c3d, 0xbef: 0x2a5e, + 0xbf0: 0x2923, 0xbf1: 0x2919, 0xbf2: 0x2f53, 0xbf3: 0x2a6b, 0xbf4: 0x2c50, 0xbf5: 0x292d, + 0xbf6: 0x2b69, 0xbf7: 0x2937, 0xbf8: 0x2a85, 0xbf9: 0x2941, 0xbfa: 0x2a92, 0xbfb: 0x2f1d, + 0xbfc: 0x2a78, 0xbfd: 0x2b79, 0xbfe: 0x2a9f, 0xbff: 0x26e4, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f2e, 0xc01: 0x294b, 0xc02: 0x2955, 0xc03: 0x2aac, 0xc04: 0x295f, 0xc05: 0x2969, + 0xc06: 0x2973, 0xc07: 0x2b89, 0xc08: 0x2ab9, 0xc09: 0x26eb, 0xc0a: 0x2c63, 0xc0b: 0x2ea7, + 0xc0c: 0x2b99, 0xc0d: 0x2ac6, 0xc0e: 0x2edc, 0xc0f: 0x297d, 0xc10: 0x2987, 0xc11: 0x2ad3, + 0xc12: 0x26f2, 0xc13: 0x2ae0, 0xc14: 0x2ba9, 0xc15: 0x26f9, 0xc16: 0x2c76, 0xc17: 0x2991, + 0xc18: 0x1cbd, 0xc19: 0x1cd1, 0xc1a: 0x1ce0, 0xc1b: 0x1cef, 0xc1c: 0x1cfe, 0xc1d: 0x1d0d, + 0xc1e: 0x1d1c, 0xc1f: 0x1d2b, 0xc20: 0x1d3a, 0xc21: 0x1d49, 0xc22: 0x2198, 0xc23: 0x21aa, + 0xc24: 0x21bc, 0xc25: 0x21c8, 0xc26: 0x21d4, 0xc27: 0x21e0, 0xc28: 0x21ec, 0xc29: 0x21f8, + 0xc2a: 0x2204, 0xc2b: 0x2210, 0xc2c: 0x224c, 0xc2d: 0x2258, 0xc2e: 0x2264, 0xc2f: 0x2270, + 0xc30: 0x227c, 0xc31: 0x1c1a, 0xc32: 0x19cc, 0xc33: 0x1939, 0xc34: 0x1bea, 0xc35: 0x1a4d, + 0xc36: 0x1a5c, 0xc37: 0x19d2, 0xc38: 0x1c02, 0xc39: 0x1c06, 0xc3a: 0x1963, 0xc3b: 0x270e, + 0xc3c: 0x271c, 0xc3d: 0x2707, 0xc3e: 0x2715, 0xc3f: 0x2aed, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a50, 0xc41: 0x1a38, 0xc42: 0x1c66, 0xc43: 0x1a20, 0xc44: 0x19f9, 0xc45: 0x196c, + 0xc46: 0x197b, 0xc47: 0x194b, 0xc48: 0x1bf6, 0xc49: 0x1d58, 0xc4a: 0x1a53, 0xc4b: 0x1a3b, + 0xc4c: 0x1c6a, 0xc4d: 0x1c76, 0xc4e: 0x1a2c, 0xc4f: 0x1a02, 0xc50: 0x195a, 0xc51: 0x1c22, + 0xc52: 0x1bb6, 0xc53: 0x1ba2, 0xc54: 0x1bd2, 0xc55: 0x1c7a, 0xc56: 0x1a2f, 0xc57: 0x19cf, + 0xc58: 0x1a05, 0xc59: 0x19e4, 0xc5a: 0x1a47, 0xc5b: 0x1c7e, 0xc5c: 0x1a32, 0xc5d: 0x19c6, + 0xc5e: 0x1a08, 0xc5f: 0x1c42, 0xc60: 0x1bfa, 0xc61: 0x1a1a, 0xc62: 0x1c2a, 0xc63: 0x1c46, + 0xc64: 0x1bfe, 0xc65: 0x1a1d, 0xc66: 0x1c2e, 0xc67: 0x22ee, 0xc68: 0x2302, 0xc69: 0x199c, + 0xc6a: 0x1c26, 0xc6b: 0x1bba, 0xc6c: 0x1ba6, 0xc6d: 0x1c4e, 0xc6e: 0x2723, 0xc6f: 0x27ba, + 0xc70: 0x1a5f, 0xc71: 0x1a4a, 0xc72: 0x1c82, 0xc73: 0x1a35, 0xc74: 0x1a56, 0xc75: 0x1a3e, + 0xc76: 0x1c6e, 0xc77: 0x1a23, 0xc78: 0x19fc, 0xc79: 0x1987, 0xc7a: 0x1a59, 0xc7b: 0x1a41, + 0xc7c: 0x1c72, 0xc7d: 0x1a26, 0xc7e: 0x19ff, 0xc7f: 0x198a, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c32, 0xc81: 0x1bbe, 0xc82: 0x1d53, 0xc83: 0x193c, 0xc84: 0x19c0, 0xc85: 0x19c3, + 0xc86: 0x22fb, 0xc87: 0x1b9a, 0xc88: 0x19c9, 0xc89: 0x194e, 0xc8a: 0x19e7, 0xc8b: 0x1951, + 0xc8c: 0x19f0, 0xc8d: 0x196f, 0xc8e: 0x1972, 0xc8f: 0x1a0b, 0xc90: 0x1a11, 0xc91: 0x1a14, + 0xc92: 0x1c36, 0xc93: 0x1a17, 0xc94: 0x1a29, 0xc95: 0x1c3e, 0xc96: 0x1c4a, 0xc97: 0x1996, + 0xc98: 0x1d5d, 0xc99: 0x1bc2, 0xc9a: 0x1999, 0xc9b: 0x1a62, 0xc9c: 0x19ab, 0xc9d: 0x19ba, + 0xc9e: 0x22e8, 0xc9f: 0x22e2, 0xca0: 0x1cc7, 0xca1: 0x1cd6, 0xca2: 0x1ce5, 0xca3: 0x1cf4, + 0xca4: 0x1d03, 0xca5: 0x1d12, 0xca6: 0x1d21, 0xca7: 0x1d30, 0xca8: 0x1d3f, 0xca9: 0x218c, + 0xcaa: 0x219e, 0xcab: 0x21b0, 0xcac: 0x21c2, 0xcad: 0x21ce, 0xcae: 0x21da, 0xcaf: 0x21e6, + 0xcb0: 0x21f2, 0xcb1: 0x21fe, 0xcb2: 0x220a, 0xcb3: 0x2246, 0xcb4: 0x2252, 0xcb5: 0x225e, + 0xcb6: 0x226a, 0xcb7: 0x2276, 0xcb8: 0x2282, 0xcb9: 0x2288, 0xcba: 0x228e, 0xcbb: 0x2294, + 0xcbc: 0x229a, 0xcbd: 0x22ac, 0xcbe: 0x22b2, 0xcbf: 0x1c16, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x137a, 0xcc1: 0x0cfe, 0xcc2: 0x13d6, 0xcc3: 0x13a2, 0xcc4: 0x0e5a, 0xcc5: 0x06ee, + 0xcc6: 0x08e2, 0xcc7: 0x162e, 0xcc8: 0x162e, 0xcc9: 0x0a0e, 0xcca: 0x1462, 0xccb: 0x0946, + 0xccc: 0x0a0a, 0xccd: 0x0bf2, 0xcce: 0x0fd2, 0xccf: 0x1162, 0xcd0: 0x129a, 0xcd1: 0x12d6, + 0xcd2: 0x130a, 0xcd3: 0x141e, 0xcd4: 0x0d76, 0xcd5: 0x0e02, 0xcd6: 0x0eae, 0xcd7: 0x0f46, + 0xcd8: 0x1262, 0xcd9: 0x144a, 0xcda: 0x1576, 0xcdb: 0x0712, 0xcdc: 0x08b6, 0xcdd: 0x0d8a, + 0xcde: 0x0ed2, 0xcdf: 0x1296, 0xce0: 0x15c6, 0xce1: 0x0ab6, 0xce2: 0x0e7a, 0xce3: 0x1286, + 0xce4: 0x131a, 0xce5: 0x0c26, 0xce6: 0x11be, 0xce7: 0x12e2, 0xce8: 0x0b22, 0xce9: 0x0d12, + 0xcea: 0x0e1a, 0xceb: 0x0f1e, 0xcec: 0x142a, 0xced: 0x0752, 0xcee: 0x07ea, 0xcef: 0x0856, + 0xcf0: 0x0c8e, 0xcf1: 0x0d82, 0xcf2: 0x0ece, 0xcf3: 0x0ff2, 0xcf4: 0x117a, 0xcf5: 0x128e, + 0xcf6: 0x12a6, 0xcf7: 0x13ca, 0xcf8: 0x14f2, 0xcf9: 0x15a6, 0xcfa: 0x15c2, 0xcfb: 0x102e, + 0xcfc: 0x106e, 0xcfd: 0x1126, 0xcfe: 0x1246, 0xcff: 0x147e, + // Block 0x34, offset 0xd00 + 0xd00: 0x15ce, 0xd01: 0x134e, 0xd02: 0x09ca, 0xd03: 0x0b3e, 0xd04: 0x10de, 0xd05: 0x119e, + 0xd06: 0x0f02, 0xd07: 0x1036, 0xd08: 0x139a, 0xd09: 0x14ea, 0xd0a: 0x09c6, 0xd0b: 0x0a92, + 0xd0c: 0x0d7a, 0xd0d: 0x0e2e, 0xd0e: 0x0e62, 0xd0f: 0x1116, 0xd10: 0x113e, 0xd11: 0x14aa, + 0xd12: 0x0852, 0xd13: 0x11aa, 0xd14: 0x07f6, 0xd15: 0x07f2, 0xd16: 0x109a, 0xd17: 0x112a, + 0xd18: 0x125e, 0xd19: 0x14b2, 0xd1a: 0x136a, 0xd1b: 0x0c2a, 0xd1c: 0x0d76, 0xd1d: 0x135a, + 0xd1e: 0x06fa, 0xd1f: 0x0a66, 0xd20: 0x0b96, 0xd21: 0x0f32, 0xd22: 0x0fb2, 0xd23: 0x0876, + 0xd24: 0x103e, 0xd25: 0x0762, 0xd26: 0x0b7a, 0xd27: 0x06da, 0xd28: 0x0dee, 0xd29: 0x0ca6, + 0xd2a: 0x1112, 0xd2b: 0x08ca, 0xd2c: 0x09b6, 0xd2d: 0x0ffe, 0xd2e: 0x1266, 0xd2f: 0x133e, + 0xd30: 0x0dba, 0xd31: 0x13fa, 0xd32: 0x0de6, 0xd33: 0x0c3a, 0xd34: 0x121e, 0xd35: 0x0c5a, + 0xd36: 0x0fae, 0xd37: 0x072e, 0xd38: 0x07aa, 0xd39: 0x07ee, 0xd3a: 0x0d56, 0xd3b: 0x10fe, + 0xd3c: 0x11f6, 0xd3d: 0x134a, 0xd3e: 0x145e, 0xd3f: 0x085e, + // Block 0x35, offset 0xd40 + 0xd40: 0x0912, 0xd41: 0x0a1a, 0xd42: 0x0b32, 0xd43: 0x0cc2, 0xd44: 0x0e7e, 0xd45: 0x1042, + 0xd46: 0x149a, 0xd47: 0x157e, 0xd48: 0x15d2, 0xd49: 0x15ea, 0xd4a: 0x083a, 0xd4b: 0x0cf6, + 0xd4c: 0x0da6, 0xd4d: 0x13ee, 0xd4e: 0x0afe, 0xd4f: 0x0bda, 0xd50: 0x0bf6, 0xd51: 0x0c86, + 0xd52: 0x0e6e, 0xd53: 0x0eba, 0xd54: 0x0f6a, 0xd55: 0x108e, 0xd56: 0x1132, 0xd57: 0x1196, + 0xd58: 0x13de, 0xd59: 0x126e, 0xd5a: 0x1406, 0xd5b: 0x1482, 0xd5c: 0x0812, 0xd5d: 0x083e, + 0xd5e: 0x0926, 0xd5f: 0x0eaa, 0xd60: 0x12f6, 0xd61: 0x133e, 0xd62: 0x0b1e, 0xd63: 0x0b8e, + 0xd64: 0x0c52, 0xd65: 0x0db2, 0xd66: 0x10da, 0xd67: 0x0f26, 0xd68: 0x073e, 0xd69: 0x0982, + 0xd6a: 0x0a66, 0xd6b: 0x0aca, 0xd6c: 0x0b9a, 0xd6d: 0x0f42, 0xd6e: 0x0f5e, 0xd6f: 0x116e, + 0xd70: 0x118e, 0xd71: 0x1466, 0xd72: 0x14e6, 0xd73: 0x14f6, 0xd74: 0x1532, 0xd75: 0x0756, + 0xd76: 0x1082, 0xd77: 0x1452, 0xd78: 0x14ce, 0xd79: 0x0bb2, 0xd7a: 0x071a, 0xd7b: 0x077a, + 0xd7c: 0x0a6a, 0xd7d: 0x0a8a, 0xd7e: 0x0cb2, 0xd7f: 0x0d76, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec6, 0xd81: 0x0fce, 0xd82: 0x127a, 0xd83: 0x141a, 0xd84: 0x1626, 0xd85: 0x0ce6, + 0xd86: 0x14a6, 0xd87: 0x0836, 0xd88: 0x0d32, 0xd89: 0x0d3e, 0xd8a: 0x0e12, 0xd8b: 0x0e4a, + 0xd8c: 0x0f4e, 0xd8d: 0x0faa, 0xd8e: 0x102a, 0xd8f: 0x110e, 0xd90: 0x153e, 0xd91: 0x07b2, + 0xd92: 0x0c06, 0xd93: 0x14b6, 0xd94: 0x076a, 0xd95: 0x0aae, 0xd96: 0x0e32, 0xd97: 0x13e2, + 0xd98: 0x0b6a, 0xd99: 0x0bba, 0xd9a: 0x0d46, 0xd9b: 0x0f32, 0xd9c: 0x14be, 0xd9d: 0x081a, + 0xd9e: 0x0902, 0xd9f: 0x0a9a, 0xda0: 0x0cd6, 0xda1: 0x0d22, 0xda2: 0x0d62, 0xda3: 0x0df6, + 0xda4: 0x0f4a, 0xda5: 0x0fbe, 0xda6: 0x115a, 0xda7: 0x12fa, 0xda8: 0x1306, 0xda9: 0x145a, + 0xdaa: 0x14da, 0xdab: 0x0886, 0xdac: 0x0e4e, 0xdad: 0x0906, 0xdae: 0x0eca, 0xdaf: 0x0f6e, + 0xdb0: 0x128a, 0xdb1: 0x14c2, 0xdb2: 0x15ae, 0xdb3: 0x15d6, 0xdb4: 0x0d3a, 0xdb5: 0x0e2a, + 0xdb6: 0x11c6, 0xdb7: 0x10ba, 0xdb8: 0x10c6, 0xdb9: 0x10ea, 0xdba: 0x0f1a, 0xdbb: 0x0ea2, + 0xdbc: 0x1366, 0xdbd: 0x0736, 0xdbe: 0x122e, 0xdbf: 0x081e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080e, 0xdc1: 0x0b0e, 0xdc2: 0x0c2e, 0xdc3: 0x10f6, 0xdc4: 0x0a56, 0xdc5: 0x0e06, + 0xdc6: 0x0cf2, 0xdc7: 0x13ea, 0xdc8: 0x12ea, 0xdc9: 0x14ae, 0xdca: 0x1326, 0xdcb: 0x0b2a, + 0xdcc: 0x078a, 0xdcd: 0x095e, 0xdd0: 0x09b2, + 0xdd2: 0x0ce2, 0xdd5: 0x07fa, 0xdd6: 0x0f22, 0xdd7: 0x0fe6, + 0xdd8: 0x104a, 0xdd9: 0x1066, 0xdda: 0x106a, 0xddb: 0x107e, 0xddc: 0x14fe, 0xddd: 0x10ee, + 0xdde: 0x1172, 0xde0: 0x1292, 0xde2: 0x1356, + 0xde5: 0x140a, 0xde6: 0x1436, + 0xdea: 0x1552, 0xdeb: 0x1556, 0xdec: 0x155a, 0xded: 0x15be, 0xdee: 0x142e, 0xdef: 0x14ca, + 0xdf0: 0x075a, 0xdf1: 0x077e, 0xdf2: 0x0792, 0xdf3: 0x084e, 0xdf4: 0x085a, 0xdf5: 0x089a, + 0xdf6: 0x094e, 0xdf7: 0x096a, 0xdf8: 0x0972, 0xdf9: 0x09ae, 0xdfa: 0x09ba, 0xdfb: 0x0a96, + 0xdfc: 0x0a9e, 0xdfd: 0x0ba6, 0xdfe: 0x0bce, 0xdff: 0x0bd6, + // Block 0x38, offset 0xe00 + 0xe00: 0x0bee, 0xe01: 0x0c9a, 0xe02: 0x0cca, 0xe03: 0x0cea, 0xe04: 0x0d5a, 0xe05: 0x0e1e, + 0xe06: 0x0e3a, 0xe07: 0x0e6a, 0xe08: 0x0ebe, 0xe09: 0x0ede, 0xe0a: 0x0f52, 0xe0b: 0x1032, + 0xe0c: 0x104e, 0xe0d: 0x1056, 0xe0e: 0x1052, 0xe0f: 0x105a, 0xe10: 0x105e, 0xe11: 0x1062, + 0xe12: 0x1076, 0xe13: 0x107a, 0xe14: 0x109e, 0xe15: 0x10b2, 0xe16: 0x10ce, 0xe17: 0x1132, + 0xe18: 0x113a, 0xe19: 0x1142, 0xe1a: 0x1156, 0xe1b: 0x117e, 0xe1c: 0x11ce, 0xe1d: 0x1202, + 0xe1e: 0x1202, 0xe1f: 0x126a, 0xe20: 0x1312, 0xe21: 0x132a, 0xe22: 0x135e, 0xe23: 0x1362, + 0xe24: 0x13a6, 0xe25: 0x13aa, 0xe26: 0x1402, 0xe27: 0x140a, 0xe28: 0x14de, 0xe29: 0x1522, + 0xe2a: 0x153a, 0xe2b: 0x0b9e, 0xe2c: 0x1721, 0xe2d: 0x11e6, + 0xe30: 0x06e2, 0xe31: 0x07e6, 0xe32: 0x07a6, 0xe33: 0x074e, 0xe34: 0x078e, 0xe35: 0x07ba, + 0xe36: 0x084a, 0xe37: 0x0866, 0xe38: 0x094e, 0xe39: 0x093a, 0xe3a: 0x094a, 0xe3b: 0x0966, + 0xe3c: 0x09b2, 0xe3d: 0x09c2, 0xe3e: 0x0a06, 0xe3f: 0x0a12, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2e, 0xe41: 0x0a3e, 0xe42: 0x0b26, 0xe43: 0x0b2e, 0xe44: 0x0b5e, 0xe45: 0x0b7e, + 0xe46: 0x0bae, 0xe47: 0x0bc6, 0xe48: 0x0bb6, 0xe49: 0x0bd6, 0xe4a: 0x0bca, 0xe4b: 0x0bee, + 0xe4c: 0x0c0a, 0xe4d: 0x0c62, 0xe4e: 0x0c6e, 0xe4f: 0x0c76, 0xe50: 0x0c9e, 0xe51: 0x0ce2, + 0xe52: 0x0d12, 0xe53: 0x0d16, 0xe54: 0x0d2a, 0xe55: 0x0daa, 0xe56: 0x0dba, 0xe57: 0x0e12, + 0xe58: 0x0e5e, 0xe59: 0x0e56, 0xe5a: 0x0e6a, 0xe5b: 0x0e86, 0xe5c: 0x0ebe, 0xe5d: 0x1016, + 0xe5e: 0x0ee2, 0xe5f: 0x0f16, 0xe60: 0x0f22, 0xe61: 0x0f62, 0xe62: 0x0f7e, 0xe63: 0x0fa2, + 0xe64: 0x0fc6, 0xe65: 0x0fca, 0xe66: 0x0fe6, 0xe67: 0x0fea, 0xe68: 0x0ffa, 0xe69: 0x100e, + 0xe6a: 0x100a, 0xe6b: 0x103a, 0xe6c: 0x10b6, 0xe6d: 0x10ce, 0xe6e: 0x10e6, 0xe6f: 0x111e, + 0xe70: 0x1132, 0xe71: 0x114e, 0xe72: 0x117e, 0xe73: 0x1232, 0xe74: 0x125a, 0xe75: 0x12ce, + 0xe76: 0x1316, 0xe77: 0x1322, 0xe78: 0x132a, 0xe79: 0x1342, 0xe7a: 0x1356, 0xe7b: 0x1346, + 0xe7c: 0x135e, 0xe7d: 0x135a, 0xe7e: 0x1352, 0xe7f: 0x1362, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136e, 0xe81: 0x13aa, 0xe82: 0x13e6, 0xe83: 0x1416, 0xe84: 0x144e, 0xe85: 0x146e, + 0xe86: 0x14ba, 0xe87: 0x14de, 0xe88: 0x14fe, 0xe89: 0x1512, 0xe8a: 0x1522, 0xe8b: 0x152e, + 0xe8c: 0x153a, 0xe8d: 0x158e, 0xe8e: 0x162e, 0xe8f: 0x16b8, 0xe90: 0x16b3, 0xe91: 0x16e5, + 0xe92: 0x060a, 0xe93: 0x0632, 0xe94: 0x0636, 0xe95: 0x1767, 0xe96: 0x1794, 0xe97: 0x180c, + 0xe98: 0x161a, 0xe99: 0x162a, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19db, 0xec1: 0x19de, 0xec2: 0x19e1, 0xec3: 0x1c0e, 0xec4: 0x1c12, 0xec5: 0x1a65, + 0xec6: 0x1a65, + 0xed3: 0x1d7b, 0xed4: 0x1d6c, 0xed5: 0x1d71, 0xed6: 0x1d80, 0xed7: 0x1d76, + 0xedd: 0x43a7, + 0xede: 0x8116, 0xedf: 0x4419, 0xee0: 0x0230, 0xee1: 0x0218, 0xee2: 0x0221, 0xee3: 0x0224, + 0xee4: 0x0227, 0xee5: 0x022a, 0xee6: 0x022d, 0xee7: 0x0233, 0xee8: 0x0236, 0xee9: 0x0017, + 0xeea: 0x4407, 0xeeb: 0x440d, 0xeec: 0x450b, 0xeed: 0x4513, 0xeee: 0x435f, 0xeef: 0x4365, + 0xef0: 0x436b, 0xef1: 0x4371, 0xef2: 0x437d, 0xef3: 0x4383, 0xef4: 0x4389, 0xef5: 0x4395, + 0xef6: 0x439b, 0xef8: 0x43a1, 0xef9: 0x43ad, 0xefa: 0x43b3, 0xefb: 0x43b9, + 0xefc: 0x43c5, 0xefe: 0x43cb, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43d1, 0xf01: 0x43d7, 0xf03: 0x43dd, 0xf04: 0x43e3, + 0xf06: 0x43ef, 0xf07: 0x43f5, 0xf08: 0x43fb, 0xf09: 0x4401, 0xf0a: 0x4413, 0xf0b: 0x438f, + 0xf0c: 0x4377, 0xf0d: 0x43bf, 0xf0e: 0x43e9, 0xf0f: 0x1d85, 0xf10: 0x029c, 0xf11: 0x029c, + 0xf12: 0x02a5, 0xf13: 0x02a5, 0xf14: 0x02a5, 0xf15: 0x02a5, 0xf16: 0x02a8, 0xf17: 0x02a8, + 0xf18: 0x02a8, 0xf19: 0x02a8, 0xf1a: 0x02ae, 0xf1b: 0x02ae, 0xf1c: 0x02ae, 0xf1d: 0x02ae, + 0xf1e: 0x02a2, 0xf1f: 0x02a2, 0xf20: 0x02a2, 0xf21: 0x02a2, 0xf22: 0x02ab, 0xf23: 0x02ab, + 0xf24: 0x02ab, 0xf25: 0x02ab, 0xf26: 0x029f, 0xf27: 0x029f, 0xf28: 0x029f, 0xf29: 0x029f, + 0xf2a: 0x02d2, 0xf2b: 0x02d2, 0xf2c: 0x02d2, 0xf2d: 0x02d2, 0xf2e: 0x02d5, 0xf2f: 0x02d5, + 0xf30: 0x02d5, 0xf31: 0x02d5, 0xf32: 0x02b4, 0xf33: 0x02b4, 0xf34: 0x02b4, 0xf35: 0x02b4, + 0xf36: 0x02b1, 0xf37: 0x02b1, 0xf38: 0x02b1, 0xf39: 0x02b1, 0xf3a: 0x02b7, 0xf3b: 0x02b7, + 0xf3c: 0x02b7, 0xf3d: 0x02b7, 0xf3e: 0x02ba, 0xf3f: 0x02ba, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02ba, 0xf41: 0x02ba, 0xf42: 0x02c3, 0xf43: 0x02c3, 0xf44: 0x02c0, 0xf45: 0x02c0, + 0xf46: 0x02c6, 0xf47: 0x02c6, 0xf48: 0x02bd, 0xf49: 0x02bd, 0xf4a: 0x02cc, 0xf4b: 0x02cc, + 0xf4c: 0x02c9, 0xf4d: 0x02c9, 0xf4e: 0x02d8, 0xf4f: 0x02d8, 0xf50: 0x02d8, 0xf51: 0x02d8, + 0xf52: 0x02de, 0xf53: 0x02de, 0xf54: 0x02de, 0xf55: 0x02de, 0xf56: 0x02e4, 0xf57: 0x02e4, + 0xf58: 0x02e4, 0xf59: 0x02e4, 0xf5a: 0x02e1, 0xf5b: 0x02e1, 0xf5c: 0x02e1, 0xf5d: 0x02e1, + 0xf5e: 0x02e7, 0xf5f: 0x02e7, 0xf60: 0x02ea, 0xf61: 0x02ea, 0xf62: 0x02ea, 0xf63: 0x02ea, + 0xf64: 0x4485, 0xf65: 0x4485, 0xf66: 0x02f0, 0xf67: 0x02f0, 0xf68: 0x02f0, 0xf69: 0x02f0, + 0xf6a: 0x02ed, 0xf6b: 0x02ed, 0xf6c: 0x02ed, 0xf6d: 0x02ed, 0xf6e: 0x030b, 0xf6f: 0x030b, + 0xf70: 0x447f, 0xf71: 0x447f, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02db, 0xf94: 0x02db, 0xf95: 0x02db, 0xf96: 0x02db, 0xf97: 0x02f9, + 0xf98: 0x02f9, 0xf99: 0x02f6, 0xf9a: 0x02f6, 0xf9b: 0x02fc, 0xf9c: 0x02fc, 0xf9d: 0x2055, + 0xf9e: 0x0302, 0xf9f: 0x0302, 0xfa0: 0x02f3, 0xfa1: 0x02f3, 0xfa2: 0x02ff, 0xfa3: 0x02ff, + 0xfa4: 0x0308, 0xfa5: 0x0308, 0xfa6: 0x0308, 0xfa7: 0x0308, 0xfa8: 0x0290, 0xfa9: 0x0290, + 0xfaa: 0x25b0, 0xfab: 0x25b0, 0xfac: 0x2620, 0xfad: 0x2620, 0xfae: 0x25ef, 0xfaf: 0x25ef, + 0xfb0: 0x260b, 0xfb1: 0x260b, 0xfb2: 0x2604, 0xfb3: 0x2604, 0xfb4: 0x2612, 0xfb5: 0x2612, + 0xfb6: 0x2619, 0xfb7: 0x2619, 0xfb8: 0x2619, 0xfb9: 0x25f6, 0xfba: 0x25f6, 0xfbb: 0x25f6, + 0xfbc: 0x0305, 0xfbd: 0x0305, 0xfbe: 0x0305, 0xfbf: 0x0305, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b7, 0xfc1: 0x25be, 0xfc2: 0x25da, 0xfc3: 0x25f6, 0xfc4: 0x25fd, 0xfc5: 0x1d8f, + 0xfc6: 0x1d94, 0xfc7: 0x1d99, 0xfc8: 0x1da8, 0xfc9: 0x1db7, 0xfca: 0x1dbc, 0xfcb: 0x1dc1, + 0xfcc: 0x1dc6, 0xfcd: 0x1dcb, 0xfce: 0x1dda, 0xfcf: 0x1de9, 0xfd0: 0x1dee, 0xfd1: 0x1df3, + 0xfd2: 0x1e02, 0xfd3: 0x1e11, 0xfd4: 0x1e16, 0xfd5: 0x1e1b, 0xfd6: 0x1e20, 0xfd7: 0x1e2f, + 0xfd8: 0x1e34, 0xfd9: 0x1e43, 0xfda: 0x1e48, 0xfdb: 0x1e4d, 0xfdc: 0x1e5c, 0xfdd: 0x1e61, + 0xfde: 0x1e66, 0xfdf: 0x1e70, 0xfe0: 0x1eac, 0xfe1: 0x1ebb, 0xfe2: 0x1eca, 0xfe3: 0x1ecf, + 0xfe4: 0x1ed4, 0xfe5: 0x1ede, 0xfe6: 0x1eed, 0xfe7: 0x1ef2, 0xfe8: 0x1f01, 0xfe9: 0x1f06, + 0xfea: 0x1f0b, 0xfeb: 0x1f1a, 0xfec: 0x1f1f, 0xfed: 0x1f2e, 0xfee: 0x1f33, 0xfef: 0x1f38, + 0xff0: 0x1f3d, 0xff1: 0x1f42, 0xff2: 0x1f47, 0xff3: 0x1f4c, 0xff4: 0x1f51, 0xff5: 0x1f56, + 0xff6: 0x1f5b, 0xff7: 0x1f60, 0xff8: 0x1f65, 0xff9: 0x1f6a, 0xffa: 0x1f6f, 0xffb: 0x1f74, + 0xffc: 0x1f79, 0xffd: 0x1f7e, 0xffe: 0x1f83, 0xfff: 0x1f8d, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f92, 0x1001: 0x1f97, 0x1002: 0x1f9c, 0x1003: 0x1fa6, 0x1004: 0x1fab, 0x1005: 0x1fb5, + 0x1006: 0x1fba, 0x1007: 0x1fbf, 0x1008: 0x1fc4, 0x1009: 0x1fc9, 0x100a: 0x1fce, 0x100b: 0x1fd3, + 0x100c: 0x1fd8, 0x100d: 0x1fdd, 0x100e: 0x1fec, 0x100f: 0x1ffb, 0x1010: 0x2000, 0x1011: 0x2005, + 0x1012: 0x200a, 0x1013: 0x200f, 0x1014: 0x2014, 0x1015: 0x201e, 0x1016: 0x2023, 0x1017: 0x2028, + 0x1018: 0x2037, 0x1019: 0x2046, 0x101a: 0x204b, 0x101b: 0x4437, 0x101c: 0x443d, 0x101d: 0x4473, + 0x101e: 0x44ca, 0x101f: 0x44d1, 0x1020: 0x44d8, 0x1021: 0x44df, 0x1022: 0x44e6, 0x1023: 0x44ed, + 0x1024: 0x25cc, 0x1025: 0x25d3, 0x1026: 0x25da, 0x1027: 0x25e1, 0x1028: 0x25f6, 0x1029: 0x25fd, + 0x102a: 0x1d9e, 0x102b: 0x1da3, 0x102c: 0x1da8, 0x102d: 0x1dad, 0x102e: 0x1db7, 0x102f: 0x1dbc, + 0x1030: 0x1dd0, 0x1031: 0x1dd5, 0x1032: 0x1dda, 0x1033: 0x1ddf, 0x1034: 0x1de9, 0x1035: 0x1dee, + 0x1036: 0x1df8, 0x1037: 0x1dfd, 0x1038: 0x1e02, 0x1039: 0x1e07, 0x103a: 0x1e11, 0x103b: 0x1e16, + 0x103c: 0x1f42, 0x103d: 0x1f47, 0x103e: 0x1f56, 0x103f: 0x1f5b, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f60, 0x1041: 0x1f74, 0x1042: 0x1f79, 0x1043: 0x1f7e, 0x1044: 0x1f83, 0x1045: 0x1f9c, + 0x1046: 0x1fa6, 0x1047: 0x1fab, 0x1048: 0x1fb0, 0x1049: 0x1fc4, 0x104a: 0x1fe2, 0x104b: 0x1fe7, + 0x104c: 0x1fec, 0x104d: 0x1ff1, 0x104e: 0x1ffb, 0x104f: 0x2000, 0x1050: 0x4473, 0x1051: 0x202d, + 0x1052: 0x2032, 0x1053: 0x2037, 0x1054: 0x203c, 0x1055: 0x2046, 0x1056: 0x204b, 0x1057: 0x25b7, + 0x1058: 0x25be, 0x1059: 0x25c5, 0x105a: 0x25da, 0x105b: 0x25e8, 0x105c: 0x1d8f, 0x105d: 0x1d94, + 0x105e: 0x1d99, 0x105f: 0x1da8, 0x1060: 0x1db2, 0x1061: 0x1dc1, 0x1062: 0x1dc6, 0x1063: 0x1dcb, + 0x1064: 0x1dda, 0x1065: 0x1de4, 0x1066: 0x1e02, 0x1067: 0x1e1b, 0x1068: 0x1e20, 0x1069: 0x1e2f, + 0x106a: 0x1e34, 0x106b: 0x1e43, 0x106c: 0x1e4d, 0x106d: 0x1e5c, 0x106e: 0x1e61, 0x106f: 0x1e66, + 0x1070: 0x1e70, 0x1071: 0x1eac, 0x1072: 0x1eb1, 0x1073: 0x1ebb, 0x1074: 0x1eca, 0x1075: 0x1ecf, + 0x1076: 0x1ed4, 0x1077: 0x1ede, 0x1078: 0x1eed, 0x1079: 0x1f01, 0x107a: 0x1f06, 0x107b: 0x1f0b, + 0x107c: 0x1f1a, 0x107d: 0x1f1f, 0x107e: 0x1f2e, 0x107f: 0x1f33, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f38, 0x1081: 0x1f3d, 0x1082: 0x1f4c, 0x1083: 0x1f51, 0x1084: 0x1f65, 0x1085: 0x1f6a, + 0x1086: 0x1f6f, 0x1087: 0x1f74, 0x1088: 0x1f79, 0x1089: 0x1f8d, 0x108a: 0x1f92, 0x108b: 0x1f97, + 0x108c: 0x1f9c, 0x108d: 0x1fa1, 0x108e: 0x1fb5, 0x108f: 0x1fba, 0x1090: 0x1fbf, 0x1091: 0x1fc4, + 0x1092: 0x1fd3, 0x1093: 0x1fd8, 0x1094: 0x1fdd, 0x1095: 0x1fec, 0x1096: 0x1ff6, 0x1097: 0x2005, + 0x1098: 0x200a, 0x1099: 0x4467, 0x109a: 0x201e, 0x109b: 0x2023, 0x109c: 0x2028, 0x109d: 0x2037, + 0x109e: 0x2041, 0x109f: 0x25da, 0x10a0: 0x25e8, 0x10a1: 0x1da8, 0x10a2: 0x1db2, 0x10a3: 0x1dda, + 0x10a4: 0x1de4, 0x10a5: 0x1e02, 0x10a6: 0x1e0c, 0x10a7: 0x1e70, 0x10a8: 0x1e75, 0x10a9: 0x1e98, + 0x10aa: 0x1e9d, 0x10ab: 0x1f74, 0x10ac: 0x1f79, 0x10ad: 0x1f9c, 0x10ae: 0x1fec, 0x10af: 0x1ff6, + 0x10b0: 0x2037, 0x10b1: 0x2041, 0x10b2: 0x451b, 0x10b3: 0x4523, 0x10b4: 0x452b, 0x10b5: 0x1ef7, + 0x10b6: 0x1efc, 0x10b7: 0x1f10, 0x10b8: 0x1f15, 0x10b9: 0x1f24, 0x10ba: 0x1f29, 0x10bb: 0x1e7a, + 0x10bc: 0x1e7f, 0x10bd: 0x1ea2, 0x10be: 0x1ea7, 0x10bf: 0x1e39, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e3e, 0x10c1: 0x1e25, 0x10c2: 0x1e2a, 0x10c3: 0x1e52, 0x10c4: 0x1e57, 0x10c5: 0x1ec0, + 0x10c6: 0x1ec5, 0x10c7: 0x1ee3, 0x10c8: 0x1ee8, 0x10c9: 0x1e84, 0x10ca: 0x1e89, 0x10cb: 0x1e8e, + 0x10cc: 0x1e98, 0x10cd: 0x1e93, 0x10ce: 0x1e6b, 0x10cf: 0x1eb6, 0x10d0: 0x1ed9, 0x10d1: 0x1ef7, + 0x10d2: 0x1efc, 0x10d3: 0x1f10, 0x10d4: 0x1f15, 0x10d5: 0x1f24, 0x10d6: 0x1f29, 0x10d7: 0x1e7a, + 0x10d8: 0x1e7f, 0x10d9: 0x1ea2, 0x10da: 0x1ea7, 0x10db: 0x1e39, 0x10dc: 0x1e3e, 0x10dd: 0x1e25, + 0x10de: 0x1e2a, 0x10df: 0x1e52, 0x10e0: 0x1e57, 0x10e1: 0x1ec0, 0x10e2: 0x1ec5, 0x10e3: 0x1ee3, + 0x10e4: 0x1ee8, 0x10e5: 0x1e84, 0x10e6: 0x1e89, 0x10e7: 0x1e8e, 0x10e8: 0x1e98, 0x10e9: 0x1e93, + 0x10ea: 0x1e6b, 0x10eb: 0x1eb6, 0x10ec: 0x1ed9, 0x10ed: 0x1e84, 0x10ee: 0x1e89, 0x10ef: 0x1e8e, + 0x10f0: 0x1e98, 0x10f1: 0x1e75, 0x10f2: 0x1e9d, 0x10f3: 0x1ef2, 0x10f4: 0x1e5c, 0x10f5: 0x1e61, + 0x10f6: 0x1e66, 0x10f7: 0x1e84, 0x10f8: 0x1e89, 0x10f9: 0x1e8e, 0x10fa: 0x1ef2, 0x10fb: 0x1f01, + 0x10fc: 0x441f, 0x10fd: 0x441f, + // Block 0x44, offset 0x1100 + 0x1110: 0x2317, 0x1111: 0x232c, + 0x1112: 0x232c, 0x1113: 0x2333, 0x1114: 0x233a, 0x1115: 0x234f, 0x1116: 0x2356, 0x1117: 0x235d, + 0x1118: 0x2380, 0x1119: 0x2380, 0x111a: 0x23a3, 0x111b: 0x239c, 0x111c: 0x23b8, 0x111d: 0x23aa, + 0x111e: 0x23b1, 0x111f: 0x23d4, 0x1120: 0x23d4, 0x1121: 0x23cd, 0x1122: 0x23db, 0x1123: 0x23db, + 0x1124: 0x2405, 0x1125: 0x2405, 0x1126: 0x2421, 0x1127: 0x23e9, 0x1128: 0x23e9, 0x1129: 0x23e2, + 0x112a: 0x23f7, 0x112b: 0x23f7, 0x112c: 0x23fe, 0x112d: 0x23fe, 0x112e: 0x2428, 0x112f: 0x2436, + 0x1130: 0x2436, 0x1131: 0x243d, 0x1132: 0x243d, 0x1133: 0x2444, 0x1134: 0x244b, 0x1135: 0x2452, + 0x1136: 0x2459, 0x1137: 0x2459, 0x1138: 0x2460, 0x1139: 0x246e, 0x113a: 0x247c, 0x113b: 0x2475, + 0x113c: 0x2483, 0x113d: 0x2483, 0x113e: 0x2498, 0x113f: 0x249f, + // Block 0x45, offset 0x1140 + 0x1140: 0x24d0, 0x1141: 0x24de, 0x1142: 0x24d7, 0x1143: 0x24bb, 0x1144: 0x24bb, 0x1145: 0x24e5, + 0x1146: 0x24e5, 0x1147: 0x24ec, 0x1148: 0x24ec, 0x1149: 0x2516, 0x114a: 0x251d, 0x114b: 0x2524, + 0x114c: 0x24fa, 0x114d: 0x2508, 0x114e: 0x252b, 0x114f: 0x2532, + 0x1152: 0x2501, 0x1153: 0x2586, 0x1154: 0x258d, 0x1155: 0x2563, 0x1156: 0x256a, 0x1157: 0x254e, + 0x1158: 0x254e, 0x1159: 0x2555, 0x115a: 0x257f, 0x115b: 0x2578, 0x115c: 0x25a2, 0x115d: 0x25a2, + 0x115e: 0x2310, 0x115f: 0x2325, 0x1160: 0x231e, 0x1161: 0x2348, 0x1162: 0x2341, 0x1163: 0x236b, + 0x1164: 0x2364, 0x1165: 0x238e, 0x1166: 0x2372, 0x1167: 0x2387, 0x1168: 0x23bf, 0x1169: 0x240c, + 0x116a: 0x23f0, 0x116b: 0x242f, 0x116c: 0x24c9, 0x116d: 0x24f3, 0x116e: 0x259b, 0x116f: 0x2594, + 0x1170: 0x25a9, 0x1171: 0x2540, 0x1172: 0x24a6, 0x1173: 0x2571, 0x1174: 0x2498, 0x1175: 0x24d0, + 0x1176: 0x2467, 0x1177: 0x24b4, 0x1178: 0x2547, 0x1179: 0x2539, 0x117a: 0x24c2, 0x117b: 0x24ad, + 0x117c: 0x24c2, 0x117d: 0x2547, 0x117e: 0x2379, 0x117f: 0x2395, + // Block 0x46, offset 0x1180 + 0x1180: 0x250f, 0x1181: 0x248a, 0x1182: 0x2309, 0x1183: 0x24ad, 0x1184: 0x2452, 0x1185: 0x2421, + 0x1186: 0x23c6, 0x1187: 0x255c, + 0x11b0: 0x241a, 0x11b1: 0x2491, 0x11b2: 0x27cc, 0x11b3: 0x27c3, 0x11b4: 0x27f9, 0x11b5: 0x27e7, + 0x11b6: 0x27d5, 0x11b7: 0x27f0, 0x11b8: 0x2802, 0x11b9: 0x2413, 0x11ba: 0x2c89, 0x11bb: 0x2b09, + 0x11bc: 0x27de, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0486, + 0x11d2: 0x048a, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04c2, + 0x11d8: 0x04c6, 0x11d9: 0x1b62, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x1876, 0x11f1: 0x0446, 0x11f2: 0x0442, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04ba, 0x11fa: 0x04be, 0x11fb: 0x04ae, + 0x11fc: 0x04b2, 0x11fd: 0x0496, 0x11fe: 0x049a, 0x11ff: 0x048e, + // Block 0x48, offset 0x1200 + 0x1200: 0x0492, 0x1201: 0x049e, 0x1202: 0x04a2, 0x1203: 0x04a6, 0x1204: 0x04aa, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4280, 0x120a: 0x4280, 0x120b: 0x4280, + 0x120c: 0x4280, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0486, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0446, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04ba, + 0x121e: 0x04be, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42c1, 0x1231: 0x4443, 0x1232: 0x42c6, 0x1234: 0x42cb, + 0x1236: 0x42d0, 0x1237: 0x4449, 0x1238: 0x42d5, 0x1239: 0x444f, 0x123a: 0x42da, 0x123b: 0x4455, + 0x123c: 0x42df, 0x123d: 0x445b, 0x123e: 0x42e4, 0x123f: 0x4461, + // Block 0x49, offset 0x1240 + 0x1240: 0x0239, 0x1241: 0x4425, 0x1242: 0x4425, 0x1243: 0x442b, 0x1244: 0x442b, 0x1245: 0x446d, + 0x1246: 0x446d, 0x1247: 0x4431, 0x1248: 0x4431, 0x1249: 0x4479, 0x124a: 0x4479, 0x124b: 0x4479, + 0x124c: 0x4479, 0x124d: 0x023c, 0x124e: 0x023c, 0x124f: 0x023f, 0x1250: 0x023f, 0x1251: 0x023f, + 0x1252: 0x023f, 0x1253: 0x0242, 0x1254: 0x0242, 0x1255: 0x0245, 0x1256: 0x0245, 0x1257: 0x0245, + 0x1258: 0x0245, 0x1259: 0x0248, 0x125a: 0x0248, 0x125b: 0x0248, 0x125c: 0x0248, 0x125d: 0x024b, + 0x125e: 0x024b, 0x125f: 0x024b, 0x1260: 0x024b, 0x1261: 0x024e, 0x1262: 0x024e, 0x1263: 0x024e, + 0x1264: 0x024e, 0x1265: 0x0251, 0x1266: 0x0251, 0x1267: 0x0251, 0x1268: 0x0251, 0x1269: 0x0254, + 0x126a: 0x0254, 0x126b: 0x0257, 0x126c: 0x0257, 0x126d: 0x025a, 0x126e: 0x025a, 0x126f: 0x025d, + 0x1270: 0x025d, 0x1271: 0x0260, 0x1272: 0x0260, 0x1273: 0x0260, 0x1274: 0x0260, 0x1275: 0x0263, + 0x1276: 0x0263, 0x1277: 0x0263, 0x1278: 0x0263, 0x1279: 0x0266, 0x127a: 0x0266, 0x127b: 0x0266, + 0x127c: 0x0266, 0x127d: 0x0269, 0x127e: 0x0269, 0x127f: 0x0269, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0269, 0x1281: 0x026c, 0x1282: 0x026c, 0x1283: 0x026c, 0x1284: 0x026c, 0x1285: 0x026f, + 0x1286: 0x026f, 0x1287: 0x026f, 0x1288: 0x026f, 0x1289: 0x0272, 0x128a: 0x0272, 0x128b: 0x0272, + 0x128c: 0x0272, 0x128d: 0x0275, 0x128e: 0x0275, 0x128f: 0x0275, 0x1290: 0x0275, 0x1291: 0x0278, + 0x1292: 0x0278, 0x1293: 0x0278, 0x1294: 0x0278, 0x1295: 0x027b, 0x1296: 0x027b, 0x1297: 0x027b, + 0x1298: 0x027b, 0x1299: 0x027e, 0x129a: 0x027e, 0x129b: 0x027e, 0x129c: 0x027e, 0x129d: 0x0281, + 0x129e: 0x0281, 0x129f: 0x0281, 0x12a0: 0x0281, 0x12a1: 0x0284, 0x12a2: 0x0284, 0x12a3: 0x0284, + 0x12a4: 0x0284, 0x12a5: 0x0287, 0x12a6: 0x0287, 0x12a7: 0x0287, 0x12a8: 0x0287, 0x12a9: 0x028a, + 0x12aa: 0x028a, 0x12ab: 0x028a, 0x12ac: 0x028a, 0x12ad: 0x028d, 0x12ae: 0x028d, 0x12af: 0x0290, + 0x12b0: 0x0290, 0x12b1: 0x0293, 0x12b2: 0x0293, 0x12b3: 0x0293, 0x12b4: 0x0293, 0x12b5: 0x2e17, + 0x12b6: 0x2e17, 0x12b7: 0x2e1f, 0x12b8: 0x2e1f, 0x12b9: 0x2e27, 0x12ba: 0x2e27, 0x12bb: 0x1f88, + 0x12bc: 0x1f88, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x047a, 0x12e0: 0x047e, 0x12e1: 0x048a, 0x12e2: 0x049e, 0x12e3: 0x04a2, + 0x12e4: 0x0486, 0x12e5: 0x05ae, 0x12e6: 0x05a6, 0x12e7: 0x04ca, 0x12e8: 0x04d2, 0x12e9: 0x04da, + 0x12ea: 0x04e2, 0x12eb: 0x04ea, 0x12ec: 0x056e, 0x12ed: 0x0576, 0x12ee: 0x057e, 0x12ef: 0x0522, + 0x12f0: 0x05b2, 0x12f1: 0x04ce, 0x12f2: 0x04d6, 0x12f3: 0x04de, 0x12f4: 0x04e6, 0x12f5: 0x04ee, + 0x12f6: 0x04f2, 0x12f7: 0x04f6, 0x12f8: 0x04fa, 0x12f9: 0x04fe, 0x12fa: 0x0502, 0x12fb: 0x0506, + 0x12fc: 0x050a, 0x12fd: 0x050e, 0x12fe: 0x0512, 0x12ff: 0x0516, + // Block 0x4c, offset 0x1300 + 0x1300: 0x051a, 0x1301: 0x051e, 0x1302: 0x0526, 0x1303: 0x052a, 0x1304: 0x052e, 0x1305: 0x0532, + 0x1306: 0x0536, 0x1307: 0x053a, 0x1308: 0x053e, 0x1309: 0x0542, 0x130a: 0x0546, 0x130b: 0x054a, + 0x130c: 0x054e, 0x130d: 0x0552, 0x130e: 0x0556, 0x130f: 0x055a, 0x1310: 0x055e, 0x1311: 0x0562, + 0x1312: 0x0566, 0x1313: 0x056a, 0x1314: 0x0572, 0x1315: 0x057a, 0x1316: 0x0582, 0x1317: 0x0586, + 0x1318: 0x058a, 0x1319: 0x058e, 0x131a: 0x0592, 0x131b: 0x0596, 0x131c: 0x059a, 0x131d: 0x05aa, + 0x131e: 0x4a8f, 0x131f: 0x4a95, 0x1320: 0x03c6, 0x1321: 0x0316, 0x1322: 0x031a, 0x1323: 0x4a52, + 0x1324: 0x031e, 0x1325: 0x4a58, 0x1326: 0x4a5e, 0x1327: 0x0322, 0x1328: 0x0326, 0x1329: 0x032a, + 0x132a: 0x4a64, 0x132b: 0x4a6a, 0x132c: 0x4a70, 0x132d: 0x4a76, 0x132e: 0x4a7c, 0x132f: 0x4a82, + 0x1330: 0x036a, 0x1331: 0x032e, 0x1332: 0x0332, 0x1333: 0x0336, 0x1334: 0x037e, 0x1335: 0x033a, + 0x1336: 0x033e, 0x1337: 0x0342, 0x1338: 0x0346, 0x1339: 0x034a, 0x133a: 0x034e, 0x133b: 0x0352, + 0x133c: 0x0356, 0x133d: 0x035a, 0x133e: 0x035e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49d4, 0x1343: 0x49da, 0x1344: 0x49e0, 0x1345: 0x49e6, + 0x1346: 0x49ec, 0x1347: 0x49f2, 0x134a: 0x49f8, 0x134b: 0x49fe, + 0x134c: 0x4a04, 0x134d: 0x4a0a, 0x134e: 0x4a10, 0x134f: 0x4a16, + 0x1352: 0x4a1c, 0x1353: 0x4a22, 0x1354: 0x4a28, 0x1355: 0x4a2e, 0x1356: 0x4a34, 0x1357: 0x4a3a, + 0x135a: 0x4a40, 0x135b: 0x4a46, 0x135c: 0x4a4c, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x427b, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x044a, 0x1368: 0x046e, 0x1369: 0x044e, + 0x136a: 0x0452, 0x136b: 0x0456, 0x136c: 0x045a, 0x136d: 0x0472, 0x136e: 0x0476, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0176, 0x13a9: 0x0179, + 0x13aa: 0x017c, 0x13ab: 0x017f, 0x13ac: 0x0182, 0x13ad: 0x0185, 0x13ae: 0x0188, 0x13af: 0x018b, + 0x13b0: 0x018e, 0x13b1: 0x0191, 0x13b2: 0x0194, 0x13b3: 0x0197, 0x13b4: 0x019a, 0x13b5: 0x019d, + 0x13b6: 0x01a0, 0x13b7: 0x01a3, 0x13b8: 0x01a6, 0x13b9: 0x018b, 0x13ba: 0x01a9, 0x13bb: 0x01ac, + 0x13bc: 0x01af, 0x13bd: 0x01b2, 0x13be: 0x01b5, 0x13bf: 0x01b8, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0200, 0x13c1: 0x0203, 0x13c2: 0x0206, 0x13c3: 0x045e, 0x13c4: 0x01ca, 0x13c5: 0x01d3, + 0x13c6: 0x01d9, 0x13c7: 0x01fd, 0x13c8: 0x01ee, 0x13c9: 0x01eb, 0x13ca: 0x0209, 0x13cb: 0x020c, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x023c, 0x1401: 0x023f, 0x1402: 0x024b, 0x1403: 0x0254, 0x1405: 0x028d, + 0x1406: 0x025d, 0x1407: 0x024e, 0x1408: 0x026c, 0x1409: 0x0293, 0x140a: 0x027e, 0x140b: 0x0281, + 0x140c: 0x0284, 0x140d: 0x0287, 0x140e: 0x0260, 0x140f: 0x0272, 0x1410: 0x0278, 0x1411: 0x0266, + 0x1412: 0x027b, 0x1413: 0x025a, 0x1414: 0x0263, 0x1415: 0x0245, 0x1416: 0x0248, 0x1417: 0x0251, + 0x1418: 0x0257, 0x1419: 0x0269, 0x141a: 0x026f, 0x141b: 0x0275, 0x141c: 0x0296, 0x141d: 0x02e7, + 0x141e: 0x02cf, 0x141f: 0x0299, 0x1421: 0x023f, 0x1422: 0x024b, + 0x1424: 0x028a, 0x1427: 0x024e, 0x1429: 0x0293, + 0x142a: 0x027e, 0x142b: 0x0281, 0x142c: 0x0284, 0x142d: 0x0287, 0x142e: 0x0260, 0x142f: 0x0272, + 0x1430: 0x0278, 0x1431: 0x0266, 0x1432: 0x027b, 0x1434: 0x0263, 0x1435: 0x0245, + 0x1436: 0x0248, 0x1437: 0x0251, 0x1439: 0x0269, 0x143b: 0x0275, + // Block 0x51, offset 0x1440 + 0x1442: 0x024b, + 0x1447: 0x024e, 0x1449: 0x0293, 0x144b: 0x0281, + 0x144d: 0x0287, 0x144e: 0x0260, 0x144f: 0x0272, 0x1451: 0x0266, + 0x1452: 0x027b, 0x1454: 0x0263, 0x1457: 0x0251, + 0x1459: 0x0269, 0x145b: 0x0275, 0x145d: 0x02e7, + 0x145f: 0x0299, 0x1461: 0x023f, 0x1462: 0x024b, + 0x1464: 0x028a, 0x1467: 0x024e, 0x1468: 0x026c, 0x1469: 0x0293, + 0x146a: 0x027e, 0x146c: 0x0284, 0x146d: 0x0287, 0x146e: 0x0260, 0x146f: 0x0272, + 0x1470: 0x0278, 0x1471: 0x0266, 0x1472: 0x027b, 0x1474: 0x0263, 0x1475: 0x0245, + 0x1476: 0x0248, 0x1477: 0x0251, 0x1479: 0x0269, 0x147a: 0x026f, 0x147b: 0x0275, + 0x147c: 0x0296, 0x147e: 0x02cf, + // Block 0x52, offset 0x1480 + 0x1480: 0x023c, 0x1481: 0x023f, 0x1482: 0x024b, 0x1483: 0x0254, 0x1484: 0x028a, 0x1485: 0x028d, + 0x1486: 0x025d, 0x1487: 0x024e, 0x1488: 0x026c, 0x1489: 0x0293, 0x148b: 0x0281, + 0x148c: 0x0284, 0x148d: 0x0287, 0x148e: 0x0260, 0x148f: 0x0272, 0x1490: 0x0278, 0x1491: 0x0266, + 0x1492: 0x027b, 0x1493: 0x025a, 0x1494: 0x0263, 0x1495: 0x0245, 0x1496: 0x0248, 0x1497: 0x0251, + 0x1498: 0x0257, 0x1499: 0x0269, 0x149a: 0x026f, 0x149b: 0x0275, + 0x14a1: 0x023f, 0x14a2: 0x024b, 0x14a3: 0x0254, + 0x14a5: 0x028d, 0x14a6: 0x025d, 0x14a7: 0x024e, 0x14a8: 0x026c, 0x14a9: 0x0293, + 0x14ab: 0x0281, 0x14ac: 0x0284, 0x14ad: 0x0287, 0x14ae: 0x0260, 0x14af: 0x0272, + 0x14b0: 0x0278, 0x14b1: 0x0266, 0x14b2: 0x027b, 0x14b3: 0x025a, 0x14b4: 0x0263, 0x14b5: 0x0245, + 0x14b6: 0x0248, 0x14b7: 0x0251, 0x14b8: 0x0257, 0x14b9: 0x0269, 0x14ba: 0x026f, 0x14bb: 0x0275, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x187c, 0x14c1: 0x1879, 0x14c2: 0x187f, 0x14c3: 0x18a3, 0x14c4: 0x18c7, 0x14c5: 0x18eb, + 0x14c6: 0x190f, 0x14c7: 0x1918, 0x14c8: 0x191e, 0x14c9: 0x1924, 0x14ca: 0x192a, + 0x14d0: 0x1a92, 0x14d1: 0x1a96, + 0x14d2: 0x1a9a, 0x14d3: 0x1a9e, 0x14d4: 0x1aa2, 0x14d5: 0x1aa6, 0x14d6: 0x1aaa, 0x14d7: 0x1aae, + 0x14d8: 0x1ab2, 0x14d9: 0x1ab6, 0x14da: 0x1aba, 0x14db: 0x1abe, 0x14dc: 0x1ac2, 0x14dd: 0x1ac6, + 0x14de: 0x1aca, 0x14df: 0x1ace, 0x14e0: 0x1ad2, 0x14e1: 0x1ad6, 0x14e2: 0x1ada, 0x14e3: 0x1ade, + 0x14e4: 0x1ae2, 0x14e5: 0x1ae6, 0x14e6: 0x1aea, 0x14e7: 0x1aee, 0x14e8: 0x1af2, 0x14e9: 0x1af6, + 0x14ea: 0x272b, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193f, 0x14ee: 0x19b7, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26b3, 0x1501: 0x26c8, 0x1502: 0x0506, + 0x1510: 0x0c12, 0x1511: 0x0a4a, + 0x1512: 0x08d6, 0x1513: 0x45db, 0x1514: 0x071e, 0x1515: 0x09f2, 0x1516: 0x1332, 0x1517: 0x0a02, + 0x1518: 0x072a, 0x1519: 0x0cda, 0x151a: 0x0eb2, 0x151b: 0x0cb2, 0x151c: 0x082a, 0x151d: 0x0b6e, + 0x151e: 0x07c2, 0x151f: 0x0cba, 0x1520: 0x0816, 0x1521: 0x111a, 0x1522: 0x0f86, 0x1523: 0x138e, + 0x1524: 0x09d6, 0x1525: 0x090e, 0x1526: 0x0e66, 0x1527: 0x0c1e, 0x1528: 0x0c4a, 0x1529: 0x06c2, + 0x152a: 0x06ce, 0x152b: 0x140e, 0x152c: 0x0ade, 0x152d: 0x06ea, 0x152e: 0x08f2, 0x152f: 0x0c3e, + 0x1530: 0x13b6, 0x1531: 0x0c16, 0x1532: 0x1072, 0x1533: 0x10ae, 0x1534: 0x08fa, 0x1535: 0x0e46, + 0x1536: 0x0d0e, 0x1537: 0x0d0a, 0x1538: 0x0f9a, 0x1539: 0x082e, 0x153a: 0x095a, 0x153b: 0x1446, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fe, 0x1541: 0x06f6, 0x1542: 0x0706, 0x1543: 0x164a, 0x1544: 0x074a, 0x1545: 0x075a, + 0x1546: 0x075e, 0x1547: 0x0766, 0x1548: 0x076e, 0x1549: 0x0772, 0x154a: 0x077e, 0x154b: 0x0776, + 0x154c: 0x05b6, 0x154d: 0x165e, 0x154e: 0x0792, 0x154f: 0x0796, 0x1550: 0x079a, 0x1551: 0x07b6, + 0x1552: 0x164f, 0x1553: 0x05ba, 0x1554: 0x07a2, 0x1555: 0x07c2, 0x1556: 0x1659, 0x1557: 0x07d2, + 0x1558: 0x07da, 0x1559: 0x073a, 0x155a: 0x07e2, 0x155b: 0x07e6, 0x155c: 0x1834, 0x155d: 0x0802, + 0x155e: 0x080a, 0x155f: 0x05c2, 0x1560: 0x0822, 0x1561: 0x0826, 0x1562: 0x082e, 0x1563: 0x0832, + 0x1564: 0x05c6, 0x1565: 0x084a, 0x1566: 0x084e, 0x1567: 0x085a, 0x1568: 0x0866, 0x1569: 0x086a, + 0x156a: 0x086e, 0x156b: 0x0876, 0x156c: 0x0896, 0x156d: 0x089a, 0x156e: 0x08a2, 0x156f: 0x08b2, + 0x1570: 0x08ba, 0x1571: 0x08be, 0x1572: 0x08be, 0x1573: 0x08be, 0x1574: 0x166d, 0x1575: 0x0e96, + 0x1576: 0x08d2, 0x1577: 0x08da, 0x1578: 0x1672, 0x1579: 0x08e6, 0x157a: 0x08ee, 0x157b: 0x08f6, + 0x157c: 0x091e, 0x157d: 0x090a, 0x157e: 0x0916, 0x157f: 0x091a, + // Block 0x56, offset 0x1580 + 0x1580: 0x0922, 0x1581: 0x092a, 0x1582: 0x092e, 0x1583: 0x0936, 0x1584: 0x093e, 0x1585: 0x0942, + 0x1586: 0x0942, 0x1587: 0x094a, 0x1588: 0x0952, 0x1589: 0x0956, 0x158a: 0x0962, 0x158b: 0x0986, + 0x158c: 0x096a, 0x158d: 0x098a, 0x158e: 0x096e, 0x158f: 0x0976, 0x1590: 0x080e, 0x1591: 0x09d2, + 0x1592: 0x099a, 0x1593: 0x099e, 0x1594: 0x09a2, 0x1595: 0x0996, 0x1596: 0x09aa, 0x1597: 0x09a6, + 0x1598: 0x09be, 0x1599: 0x1677, 0x159a: 0x09da, 0x159b: 0x09de, 0x159c: 0x09e6, 0x159d: 0x09f2, + 0x159e: 0x09fa, 0x159f: 0x0a16, 0x15a0: 0x167c, 0x15a1: 0x1681, 0x15a2: 0x0a22, 0x15a3: 0x0a26, + 0x15a4: 0x0a2a, 0x15a5: 0x0a1e, 0x15a6: 0x0a32, 0x15a7: 0x05ca, 0x15a8: 0x05ce, 0x15a9: 0x0a3a, + 0x15aa: 0x0a42, 0x15ab: 0x0a42, 0x15ac: 0x1686, 0x15ad: 0x0a5e, 0x15ae: 0x0a62, 0x15af: 0x0a66, + 0x15b0: 0x0a6e, 0x15b1: 0x168b, 0x15b2: 0x0a76, 0x15b3: 0x0a7a, 0x15b4: 0x0b52, 0x15b5: 0x0a82, + 0x15b6: 0x05d2, 0x15b7: 0x0a8e, 0x15b8: 0x0a9e, 0x15b9: 0x0aaa, 0x15ba: 0x0aa6, 0x15bb: 0x1695, + 0x15bc: 0x0ab2, 0x15bd: 0x169a, 0x15be: 0x0abe, 0x15bf: 0x0aba, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0ac2, 0x15c1: 0x0ad2, 0x15c2: 0x0ad6, 0x15c3: 0x05d6, 0x15c4: 0x0ae6, 0x15c5: 0x0aee, + 0x15c6: 0x0af2, 0x15c7: 0x0af6, 0x15c8: 0x05da, 0x15c9: 0x169f, 0x15ca: 0x05de, 0x15cb: 0x0b12, + 0x15cc: 0x0b16, 0x15cd: 0x0b1a, 0x15ce: 0x0b22, 0x15cf: 0x1866, 0x15d0: 0x0b3a, 0x15d1: 0x16a9, + 0x15d2: 0x16a9, 0x15d3: 0x11da, 0x15d4: 0x0b4a, 0x15d5: 0x0b4a, 0x15d6: 0x05e2, 0x15d7: 0x16cc, + 0x15d8: 0x179e, 0x15d9: 0x0b5a, 0x15da: 0x0b62, 0x15db: 0x05e6, 0x15dc: 0x0b76, 0x15dd: 0x0b86, + 0x15de: 0x0b8a, 0x15df: 0x0b92, 0x15e0: 0x0ba2, 0x15e1: 0x05ee, 0x15e2: 0x05ea, 0x15e3: 0x0ba6, + 0x15e4: 0x16ae, 0x15e5: 0x0baa, 0x15e6: 0x0bbe, 0x15e7: 0x0bc2, 0x15e8: 0x0bc6, 0x15e9: 0x0bc2, + 0x15ea: 0x0bd2, 0x15eb: 0x0bd6, 0x15ec: 0x0be6, 0x15ed: 0x0bde, 0x15ee: 0x0be2, 0x15ef: 0x0bea, + 0x15f0: 0x0bee, 0x15f1: 0x0bf2, 0x15f2: 0x0bfe, 0x15f3: 0x0c02, 0x15f4: 0x0c1a, 0x15f5: 0x0c22, + 0x15f6: 0x0c32, 0x15f7: 0x0c46, 0x15f8: 0x16bd, 0x15f9: 0x0c42, 0x15fa: 0x0c36, 0x15fb: 0x0c4e, + 0x15fc: 0x0c56, 0x15fd: 0x0c6a, 0x15fe: 0x16c2, 0x15ff: 0x0c72, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c66, 0x1601: 0x0c5e, 0x1602: 0x05f2, 0x1603: 0x0c7a, 0x1604: 0x0c82, 0x1605: 0x0c8a, + 0x1606: 0x0c7e, 0x1607: 0x05f6, 0x1608: 0x0c9a, 0x1609: 0x0ca2, 0x160a: 0x16c7, 0x160b: 0x0cce, + 0x160c: 0x0d02, 0x160d: 0x0cde, 0x160e: 0x0602, 0x160f: 0x0cea, 0x1610: 0x05fe, 0x1611: 0x05fa, + 0x1612: 0x07c6, 0x1613: 0x07ca, 0x1614: 0x0d06, 0x1615: 0x0cee, 0x1616: 0x11ae, 0x1617: 0x0666, + 0x1618: 0x0d12, 0x1619: 0x0d16, 0x161a: 0x0d1a, 0x161b: 0x0d2e, 0x161c: 0x0d26, 0x161d: 0x16e0, + 0x161e: 0x0606, 0x161f: 0x0d42, 0x1620: 0x0d36, 0x1621: 0x0d52, 0x1622: 0x0d5a, 0x1623: 0x16ea, + 0x1624: 0x0d5e, 0x1625: 0x0d4a, 0x1626: 0x0d66, 0x1627: 0x060a, 0x1628: 0x0d6a, 0x1629: 0x0d6e, + 0x162a: 0x0d72, 0x162b: 0x0d7e, 0x162c: 0x16ef, 0x162d: 0x0d86, 0x162e: 0x060e, 0x162f: 0x0d92, + 0x1630: 0x16f4, 0x1631: 0x0d96, 0x1632: 0x0612, 0x1633: 0x0da2, 0x1634: 0x0dae, 0x1635: 0x0dba, + 0x1636: 0x0dbe, 0x1637: 0x16f9, 0x1638: 0x1690, 0x1639: 0x16fe, 0x163a: 0x0dde, 0x163b: 0x1703, + 0x163c: 0x0dea, 0x163d: 0x0df2, 0x163e: 0x0de2, 0x163f: 0x0dfe, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0e, 0x1641: 0x0e1e, 0x1642: 0x0e12, 0x1643: 0x0e16, 0x1644: 0x0e22, 0x1645: 0x0e26, + 0x1646: 0x1708, 0x1647: 0x0e0a, 0x1648: 0x0e3e, 0x1649: 0x0e42, 0x164a: 0x0616, 0x164b: 0x0e56, + 0x164c: 0x0e52, 0x164d: 0x170d, 0x164e: 0x0e36, 0x164f: 0x0e72, 0x1650: 0x1712, 0x1651: 0x1717, + 0x1652: 0x0e76, 0x1653: 0x0e8a, 0x1654: 0x0e86, 0x1655: 0x0e82, 0x1656: 0x061a, 0x1657: 0x0e8e, + 0x1658: 0x0e9e, 0x1659: 0x0e9a, 0x165a: 0x0ea6, 0x165b: 0x1654, 0x165c: 0x0eb6, 0x165d: 0x171c, + 0x165e: 0x0ec2, 0x165f: 0x1726, 0x1660: 0x0ed6, 0x1661: 0x0ee2, 0x1662: 0x0ef6, 0x1663: 0x172b, + 0x1664: 0x0f0a, 0x1665: 0x0f0e, 0x1666: 0x1730, 0x1667: 0x1735, 0x1668: 0x0f2a, 0x1669: 0x0f3a, + 0x166a: 0x061e, 0x166b: 0x0f3e, 0x166c: 0x0622, 0x166d: 0x0622, 0x166e: 0x0f56, 0x166f: 0x0f5a, + 0x1670: 0x0f62, 0x1671: 0x0f66, 0x1672: 0x0f72, 0x1673: 0x0626, 0x1674: 0x0f8a, 0x1675: 0x173a, + 0x1676: 0x0fa6, 0x1677: 0x173f, 0x1678: 0x0fb2, 0x1679: 0x16a4, 0x167a: 0x0fc2, 0x167b: 0x1744, + 0x167c: 0x1749, 0x167d: 0x174e, 0x167e: 0x062a, 0x167f: 0x062e, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ffa, 0x1681: 0x1758, 0x1682: 0x1753, 0x1683: 0x175d, 0x1684: 0x1762, 0x1685: 0x1002, + 0x1686: 0x1006, 0x1687: 0x1006, 0x1688: 0x100e, 0x1689: 0x0636, 0x168a: 0x1012, 0x168b: 0x063a, + 0x168c: 0x063e, 0x168d: 0x176c, 0x168e: 0x1026, 0x168f: 0x102e, 0x1690: 0x103a, 0x1691: 0x0642, + 0x1692: 0x1771, 0x1693: 0x105e, 0x1694: 0x1776, 0x1695: 0x177b, 0x1696: 0x107e, 0x1697: 0x1096, + 0x1698: 0x0646, 0x1699: 0x109e, 0x169a: 0x10a2, 0x169b: 0x10a6, 0x169c: 0x1780, 0x169d: 0x1785, + 0x169e: 0x1785, 0x169f: 0x10be, 0x16a0: 0x064a, 0x16a1: 0x178a, 0x16a2: 0x10d2, 0x16a3: 0x10d6, + 0x16a4: 0x064e, 0x16a5: 0x178f, 0x16a6: 0x10f2, 0x16a7: 0x0652, 0x16a8: 0x1102, 0x16a9: 0x10fa, + 0x16aa: 0x110a, 0x16ab: 0x1799, 0x16ac: 0x1122, 0x16ad: 0x0656, 0x16ae: 0x112e, 0x16af: 0x1136, + 0x16b0: 0x1146, 0x16b1: 0x065a, 0x16b2: 0x17a3, 0x16b3: 0x17a8, 0x16b4: 0x065e, 0x16b5: 0x17ad, + 0x16b6: 0x115e, 0x16b7: 0x17b2, 0x16b8: 0x116a, 0x16b9: 0x1176, 0x16ba: 0x117e, 0x16bb: 0x17b7, + 0x16bc: 0x17bc, 0x16bd: 0x1192, 0x16be: 0x17c1, 0x16bf: 0x119a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16d1, 0x16c1: 0x0662, 0x16c2: 0x11b2, 0x16c3: 0x11b6, 0x16c4: 0x066a, 0x16c5: 0x11ba, + 0x16c6: 0x0a36, 0x16c7: 0x17c6, 0x16c8: 0x17cb, 0x16c9: 0x16d6, 0x16ca: 0x16db, 0x16cb: 0x11da, + 0x16cc: 0x11de, 0x16cd: 0x13f6, 0x16ce: 0x066e, 0x16cf: 0x120a, 0x16d0: 0x1206, 0x16d1: 0x120e, + 0x16d2: 0x0842, 0x16d3: 0x1212, 0x16d4: 0x1216, 0x16d5: 0x121a, 0x16d6: 0x1222, 0x16d7: 0x17d0, + 0x16d8: 0x121e, 0x16d9: 0x1226, 0x16da: 0x123a, 0x16db: 0x123e, 0x16dc: 0x122a, 0x16dd: 0x1242, + 0x16de: 0x1256, 0x16df: 0x126a, 0x16e0: 0x1236, 0x16e1: 0x124a, 0x16e2: 0x124e, 0x16e3: 0x1252, + 0x16e4: 0x17d5, 0x16e5: 0x17df, 0x16e6: 0x17da, 0x16e7: 0x0672, 0x16e8: 0x1272, 0x16e9: 0x1276, + 0x16ea: 0x127e, 0x16eb: 0x17f3, 0x16ec: 0x1282, 0x16ed: 0x17e4, 0x16ee: 0x0676, 0x16ef: 0x067a, + 0x16f0: 0x17e9, 0x16f1: 0x17ee, 0x16f2: 0x067e, 0x16f3: 0x12a2, 0x16f4: 0x12a6, 0x16f5: 0x12aa, + 0x16f6: 0x12ae, 0x16f7: 0x12ba, 0x16f8: 0x12b6, 0x16f9: 0x12c2, 0x16fa: 0x12be, 0x16fb: 0x12ce, + 0x16fc: 0x12c6, 0x16fd: 0x12ca, 0x16fe: 0x12d2, 0x16ff: 0x0682, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12da, 0x1701: 0x12de, 0x1702: 0x0686, 0x1703: 0x12ee, 0x1704: 0x12f2, 0x1705: 0x17f8, + 0x1706: 0x12fe, 0x1707: 0x1302, 0x1708: 0x068a, 0x1709: 0x130e, 0x170a: 0x05be, 0x170b: 0x17fd, + 0x170c: 0x1802, 0x170d: 0x068e, 0x170e: 0x0692, 0x170f: 0x133a, 0x1710: 0x1352, 0x1711: 0x136e, + 0x1712: 0x137e, 0x1713: 0x1807, 0x1714: 0x1392, 0x1715: 0x1396, 0x1716: 0x13ae, 0x1717: 0x13ba, + 0x1718: 0x1811, 0x1719: 0x1663, 0x171a: 0x13c6, 0x171b: 0x13c2, 0x171c: 0x13ce, 0x171d: 0x1668, + 0x171e: 0x13da, 0x171f: 0x13e6, 0x1720: 0x1816, 0x1721: 0x181b, 0x1722: 0x1426, 0x1723: 0x1432, + 0x1724: 0x143a, 0x1725: 0x1820, 0x1726: 0x143e, 0x1727: 0x146a, 0x1728: 0x1476, 0x1729: 0x147a, + 0x172a: 0x1472, 0x172b: 0x1486, 0x172c: 0x148a, 0x172d: 0x1825, 0x172e: 0x1496, 0x172f: 0x0696, + 0x1730: 0x149e, 0x1731: 0x182a, 0x1732: 0x069a, 0x1733: 0x14d6, 0x1734: 0x0ac6, 0x1735: 0x14ee, + 0x1736: 0x182f, 0x1737: 0x1839, 0x1738: 0x069e, 0x1739: 0x06a2, 0x173a: 0x1516, 0x173b: 0x183e, + 0x173c: 0x06a6, 0x173d: 0x1843, 0x173e: 0x152e, 0x173f: 0x152e, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1536, 0x1741: 0x1848, 0x1742: 0x154e, 0x1743: 0x06aa, 0x1744: 0x155e, 0x1745: 0x156a, + 0x1746: 0x1572, 0x1747: 0x157a, 0x1748: 0x06ae, 0x1749: 0x184d, 0x174a: 0x158e, 0x174b: 0x15aa, + 0x174c: 0x15b6, 0x174d: 0x06b2, 0x174e: 0x06b6, 0x174f: 0x15ba, 0x1750: 0x1852, 0x1751: 0x06ba, + 0x1752: 0x1857, 0x1753: 0x185c, 0x1754: 0x1861, 0x1755: 0x15de, 0x1756: 0x06be, 0x1757: 0x15f2, + 0x1758: 0x15fa, 0x1759: 0x15fe, 0x175a: 0x1606, 0x175b: 0x160e, 0x175c: 0x1616, 0x175d: 0x186b, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16b: 0x92, 0x16c: 0x0f, 0x16d: 0x93, 0x16e: 0x94, 0x16f: 0x95, + 0x170: 0x96, 0x173: 0x97, 0x174: 0x98, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x99, 0x181: 0x9a, 0x182: 0x9b, 0x183: 0x9c, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9d, 0x187: 0x9e, + 0x188: 0x9f, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa0, 0x18c: 0xa1, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa2, + 0x1a8: 0xa3, 0x1a9: 0xa4, 0x1ab: 0xa5, + 0x1b1: 0xa6, 0x1b3: 0xa7, 0x1b5: 0xa8, 0x1b7: 0xa9, + 0x1ba: 0xaa, 0x1bb: 0xab, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xac, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xad, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xae, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xaf, 0x21a: 0xb0, 0x21b: 0xb1, 0x21d: 0xb2, 0x21f: 0xb3, + 0x220: 0xb4, 0x223: 0xb5, 0x224: 0xb6, 0x225: 0xb7, 0x226: 0xb8, 0x227: 0xb9, + 0x22a: 0xba, 0x22b: 0xbb, 0x22d: 0xbc, 0x22f: 0xbd, + 0x230: 0xbe, 0x231: 0xbf, 0x232: 0xc0, 0x233: 0xc1, 0x234: 0xc2, 0x235: 0xc3, 0x236: 0xc4, 0x237: 0xbe, + 0x238: 0xbf, 0x239: 0xc0, 0x23a: 0xc1, 0x23b: 0xc2, 0x23c: 0xc3, 0x23d: 0xc4, 0x23e: 0xbe, 0x23f: 0xbf, + // Block 0x9, offset 0x240 + 0x240: 0xc0, 0x241: 0xc1, 0x242: 0xc2, 0x243: 0xc3, 0x244: 0xc4, 0x245: 0xbe, 0x246: 0xbf, 0x247: 0xc0, + 0x248: 0xc1, 0x249: 0xc2, 0x24a: 0xc3, 0x24b: 0xc4, 0x24c: 0xbe, 0x24d: 0xbf, 0x24e: 0xc0, 0x24f: 0xc1, + 0x250: 0xc2, 0x251: 0xc3, 0x252: 0xc4, 0x253: 0xbe, 0x254: 0xbf, 0x255: 0xc0, 0x256: 0xc1, 0x257: 0xc2, + 0x258: 0xc3, 0x259: 0xc4, 0x25a: 0xbe, 0x25b: 0xbf, 0x25c: 0xc0, 0x25d: 0xc1, 0x25e: 0xc2, 0x25f: 0xc3, + 0x260: 0xc4, 0x261: 0xbe, 0x262: 0xbf, 0x263: 0xc0, 0x264: 0xc1, 0x265: 0xc2, 0x266: 0xc3, 0x267: 0xc4, + 0x268: 0xbe, 0x269: 0xbf, 0x26a: 0xc0, 0x26b: 0xc1, 0x26c: 0xc2, 0x26d: 0xc3, 0x26e: 0xc4, 0x26f: 0xbe, + 0x270: 0xbf, 0x271: 0xc0, 0x272: 0xc1, 0x273: 0xc2, 0x274: 0xc3, 0x275: 0xc4, 0x276: 0xbe, 0x277: 0xbf, + 0x278: 0xc0, 0x279: 0xc1, 0x27a: 0xc2, 0x27b: 0xc3, 0x27c: 0xc4, 0x27d: 0xbe, 0x27e: 0xbf, 0x27f: 0xc0, + // Block 0xa, offset 0x280 + 0x280: 0xc1, 0x281: 0xc2, 0x282: 0xc3, 0x283: 0xc4, 0x284: 0xbe, 0x285: 0xbf, 0x286: 0xc0, 0x287: 0xc1, + 0x288: 0xc2, 0x289: 0xc3, 0x28a: 0xc4, 0x28b: 0xbe, 0x28c: 0xbf, 0x28d: 0xc0, 0x28e: 0xc1, 0x28f: 0xc2, + 0x290: 0xc3, 0x291: 0xc4, 0x292: 0xbe, 0x293: 0xbf, 0x294: 0xc0, 0x295: 0xc1, 0x296: 0xc2, 0x297: 0xc3, + 0x298: 0xc4, 0x299: 0xbe, 0x29a: 0xbf, 0x29b: 0xc0, 0x29c: 0xc1, 0x29d: 0xc2, 0x29e: 0xc3, 0x29f: 0xc4, + 0x2a0: 0xbe, 0x2a1: 0xbf, 0x2a2: 0xc0, 0x2a3: 0xc1, 0x2a4: 0xc2, 0x2a5: 0xc3, 0x2a6: 0xc4, 0x2a7: 0xbe, + 0x2a8: 0xbf, 0x2a9: 0xc0, 0x2aa: 0xc1, 0x2ab: 0xc2, 0x2ac: 0xc3, 0x2ad: 0xc4, 0x2ae: 0xbe, 0x2af: 0xbf, + 0x2b0: 0xc0, 0x2b1: 0xc1, 0x2b2: 0xc2, 0x2b3: 0xc3, 0x2b4: 0xc4, 0x2b5: 0xbe, 0x2b6: 0xbf, 0x2b7: 0xc0, + 0x2b8: 0xc1, 0x2b9: 0xc2, 0x2ba: 0xc3, 0x2bb: 0xc4, 0x2bc: 0xbe, 0x2bd: 0xbf, 0x2be: 0xc0, 0x2bf: 0xc1, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc2, 0x2c1: 0xc3, 0x2c2: 0xc4, 0x2c3: 0xbe, 0x2c4: 0xbf, 0x2c5: 0xc0, 0x2c6: 0xc1, 0x2c7: 0xc2, + 0x2c8: 0xc3, 0x2c9: 0xc4, 0x2ca: 0xbe, 0x2cb: 0xbf, 0x2cc: 0xc0, 0x2cd: 0xc1, 0x2ce: 0xc2, 0x2cf: 0xc3, + 0x2d0: 0xc4, 0x2d1: 0xbe, 0x2d2: 0xbf, 0x2d3: 0xc0, 0x2d4: 0xc1, 0x2d5: 0xc2, 0x2d6: 0xc3, 0x2d7: 0xc4, + 0x2d8: 0xbe, 0x2d9: 0xbf, 0x2da: 0xc0, 0x2db: 0xc1, 0x2dc: 0xc2, 0x2dd: 0xc3, 0x2de: 0xc5, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc6, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc7, + 0x34b: 0xc8, 0x34d: 0xc9, + 0x368: 0xca, 0x36b: 0xcb, + 0x374: 0xcc, + 0x37a: 0xcd, 0x37d: 0xce, + // Block 0xe, offset 0x380 + 0x381: 0xcf, 0x382: 0xd0, 0x384: 0xd1, 0x385: 0xb8, 0x387: 0xd2, + 0x388: 0xd3, 0x38b: 0xd4, 0x38c: 0xd5, 0x38d: 0xd6, + 0x391: 0xd7, 0x392: 0xd8, 0x393: 0xd9, 0x396: 0xda, 0x397: 0xdb, + 0x398: 0xdc, 0x39a: 0xdd, 0x39c: 0xde, + 0x3a0: 0xdf, 0x3a4: 0xe0, 0x3a5: 0xe1, 0x3a7: 0xe2, + 0x3a8: 0xe3, 0x3a9: 0xe4, 0x3aa: 0xe5, + 0x3b0: 0xdc, 0x3b5: 0xe6, 0x3b6: 0xe7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe8, 0x3ec: 0xe9, + 0x3ff: 0xea, + // Block 0x10, offset 0x400 + 0x432: 0xeb, + // Block 0x11, offset 0x440 + 0x445: 0xec, 0x446: 0xed, 0x447: 0xee, + 0x449: 0xef, + 0x450: 0xf0, 0x451: 0xf1, 0x452: 0xf2, 0x453: 0xf3, 0x454: 0xf4, 0x455: 0xf5, 0x456: 0xf6, 0x457: 0xf7, + 0x458: 0xf8, 0x459: 0xf9, 0x45a: 0x4c, 0x45b: 0xfa, 0x45c: 0xfb, 0x45d: 0xfc, 0x45e: 0xfd, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xfe, 0x484: 0xe9, + 0x48b: 0xff, + 0x4a3: 0x100, 0x4a5: 0x101, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0x102, 0x4c6: 0x103, + 0x4c8: 0x52, 0x4c9: 0x104, + 0x4ef: 0x105, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 170 entries, 340 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x134, 0x136, 0x13f, 0x141, 0x144, 0x146, 0x151, 0x15c, 0x16a, 0x178, 0x188, 0x196, 0x19d, 0x1a3, 0x1b2, 0x1b6, 0x1b8, 0x1bc, 0x1be, 0x1c1, 0x1c3, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1cf, 0x1d1, 0x1dd, 0x1e7, 0x1f1, 0x1f4, 0x1f8, 0x1fa, 0x1fc, 0x1fe, 0x201, 0x204, 0x206, 0x208, 0x20a, 0x20c, 0x212, 0x215, 0x21a, 0x21c, 0x223, 0x229, 0x22f, 0x237, 0x23d, 0x243, 0x249, 0x24d, 0x24f, 0x251, 0x253, 0x255, 0x25b, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x273, 0x27a, 0x27d, 0x280, 0x282, 0x285, 0x28d, 0x291, 0x298, 0x29b, 0x2a1, 0x2a3, 0x2a5, 0x2a8, 0x2aa, 0x2ad, 0x2b2, 0x2b4, 0x2b6, 0x2b8, 0x2ba, 0x2bc, 0x2bf, 0x2c1, 0x2c3, 0x2c5, 0x2c7, 0x2c9, 0x2d6, 0x2e0, 0x2e2, 0x2e4, 0x2e8, 0x2ed, 0x2f9, 0x2fe, 0x307, 0x30d, 0x312, 0x316, 0x31b, 0x31f, 0x32f, 0x33d, 0x34b, 0x359, 0x35f, 0x361, 0x363, 0x366, 0x371, 0x373, 0x37d} + +// nfkcSparseValues: 895 entries, 3580 bytes +var nfkcSparseValues = [895]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x428f, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x427b, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x4271, lo: 0xb4, hi: 0xb4}, + {value: 0x01df, lo: 0xb5, hi: 0xb5}, + {value: 0x42a8, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x2222, lo: 0xbc, hi: 0xbc}, + {value: 0x2216, lo: 0xbd, hi: 0xbd}, + {value: 0x22b8, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x4285, lo: 0x98, hi: 0x98}, + {value: 0x428a, lo: 0x99, hi: 0x9a}, + {value: 0x42ad, lo: 0x9b, hi: 0x9b}, + {value: 0x4276, lo: 0x9c, hi: 0x9c}, + {value: 0x4299, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x016a, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2627, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x03}, + {value: 0x263c, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xdf + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x262e, lo: 0x9c, hi: 0x9c}, + {value: 0x2635, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe3 + {value: 0x0000, lo: 0x05}, + {value: 0x030e, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe9 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x460b, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x4616, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xfa + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x108 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x0312, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x116 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x118 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x11a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x124 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x127 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x12a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x136 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x13f + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x141 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x144 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x146 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x151 + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x15c + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043e, lo: 0x91, hi: 0x91}, + {value: 0x42b2, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1876, lo: 0xa5, hi: 0xa5}, + {value: 0x1b62, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2697, lo: 0xb3, hi: 0xb3}, + {value: 0x280b, lo: 0xb4, hi: 0xb4}, + {value: 0x269e, lo: 0xb6, hi: 0xb6}, + {value: 0x2815, lo: 0xb7, hi: 0xb7}, + {value: 0x1870, lo: 0xbc, hi: 0xbc}, + {value: 0x4280, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x16a + {value: 0x0002, lo: 0x0d}, + {value: 0x1936, lo: 0x87, hi: 0x87}, + {value: 0x1933, lo: 0x88, hi: 0x88}, + {value: 0x1873, lo: 0x89, hi: 0x89}, + {value: 0x299b, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x046a, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x178 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x046a, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x199f, lo: 0xa8, hi: 0xa8}, + // Block 0x40, offset 0x188 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x41, offset 0x196 + {value: 0x0007, lo: 0x06}, + {value: 0x2186, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x42, offset 0x19d + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x43, offset 0x1a3 + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0x26a5, lo: 0xac, hi: 0xad}, + {value: 0x26ac, lo: 0xaf, hi: 0xaf}, + {value: 0x2829, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x44, offset 0x1b2 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x45, offset 0x1b6 + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x47, offset 0x1bc + {value: 0x0000, lo: 0x01}, + {value: 0x29a8, lo: 0x8c, hi: 0x8c}, + // Block 0x48, offset 0x1be + {value: 0x0266, lo: 0x02}, + {value: 0x1b92, lo: 0xb4, hi: 0xb4}, + {value: 0x1930, lo: 0xb5, hi: 0xb6}, + // Block 0x49, offset 0x1c1 + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x4a, offset 0x1c3 + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4b, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4c, offset 0x1c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0482, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4d, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4e, offset 0x1cd + {value: 0x0000, lo: 0x01}, + {value: 0x0dc6, lo: 0x9f, hi: 0x9f}, + // Block 0x4f, offset 0x1cf + {value: 0x0000, lo: 0x01}, + {value: 0x1632, lo: 0xb3, hi: 0xb3}, + // Block 0x50, offset 0x1d1 + {value: 0x0004, lo: 0x0b}, + {value: 0x159a, lo: 0x80, hi: 0x82}, + {value: 0x15b2, lo: 0x83, hi: 0x83}, + {value: 0x15ca, lo: 0x84, hi: 0x85}, + {value: 0x15da, lo: 0x86, hi: 0x89}, + {value: 0x15ee, lo: 0x8a, hi: 0x8c}, + {value: 0x1602, lo: 0x8d, hi: 0x8d}, + {value: 0x160a, lo: 0x8e, hi: 0x8e}, + {value: 0x1612, lo: 0x8f, hi: 0x90}, + {value: 0x161e, lo: 0x91, hi: 0x93}, + {value: 0x162e, lo: 0x94, hi: 0x94}, + {value: 0x1636, lo: 0x95, hi: 0x95}, + // Block 0x51, offset 0x1dd + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x04b6, lo: 0xb6, hi: 0xb6}, + {value: 0x088a, lo: 0xb8, hi: 0xba}, + // Block 0x52, offset 0x1e7 + {value: 0x0006, lo: 0x09}, + {value: 0x0316, lo: 0xb1, hi: 0xb1}, + {value: 0x031a, lo: 0xb2, hi: 0xb2}, + {value: 0x4a52, lo: 0xb3, hi: 0xb3}, + {value: 0x031e, lo: 0xb4, hi: 0xb4}, + {value: 0x4a58, lo: 0xb5, hi: 0xb6}, + {value: 0x0322, lo: 0xb7, hi: 0xb7}, + {value: 0x0326, lo: 0xb8, hi: 0xb8}, + {value: 0x032a, lo: 0xb9, hi: 0xb9}, + {value: 0x4a64, lo: 0xba, hi: 0xbf}, + // Block 0x53, offset 0x1f1 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0212, lo: 0x9c, hi: 0x9c}, + {value: 0x0215, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x55, offset 0x1f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x56, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x163e, lo: 0xb0, hi: 0xb0}, + // Block 0x57, offset 0x1fc + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x58, offset 0x1fe + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x59, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5a, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5b, offset 0x206 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5c, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5d, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5e, offset 0x20c + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x5f, offset 0x212 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x60, offset 0x215 + {value: 0x0008, lo: 0x04}, + {value: 0x163a, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1646, lo: 0x9f, hi: 0x9f}, + {value: 0x015e, lo: 0xa9, hi: 0xa9}, + // Block 0x61, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x62, offset 0x21c + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x65, offset 0x22f + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x69, offset 0x249 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6a, offset 0x24d + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6b, offset 0x24f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6c, offset 0x251 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6d, offset 0x253 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6e, offset 0x255 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x25b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x70, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x72, offset 0x262 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x73, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x26b + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x75, offset 0x273 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x76, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x77, offset 0x27d + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x78, offset 0x280 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x79, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7a, offset 0x285 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7b, offset 0x28d + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7c, offset 0x291 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x298 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x80, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x2a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x82, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x83, offset 0x2aa + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x84, offset 0x2ad + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x85, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x86, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x87, offset 0x2b6 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x88, offset 0x2b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x89, offset 0x2ba + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8a, offset 0x2bc + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8b, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8c, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8d, offset 0x2c3 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x8e, offset 0x2c5 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x8f, offset 0x2c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x90, offset 0x2c9 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x91, offset 0x2d6 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x92, offset 0x2e0 + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x93, offset 0x2e2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x94, offset 0x2e4 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x95, offset 0x2e8 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x96, offset 0x2ed + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x97, offset 0x2f9 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x98, offset 0x2fe + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x99, offset 0x307 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9a, offset 0x30d + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9b, offset 0x312 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x9c, offset 0x316 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x9d, offset 0x31b + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x31f + {value: 0x0003, lo: 0x0f}, + {value: 0x01bb, lo: 0x80, hi: 0x80}, + {value: 0x0462, lo: 0x81, hi: 0x81}, + {value: 0x01be, lo: 0x82, hi: 0x9a}, + {value: 0x045e, lo: 0x9b, hi: 0x9b}, + {value: 0x01ca, lo: 0x9c, hi: 0x9c}, + {value: 0x01d3, lo: 0x9d, hi: 0x9d}, + {value: 0x01d9, lo: 0x9e, hi: 0x9e}, + {value: 0x01fd, lo: 0x9f, hi: 0x9f}, + {value: 0x01ee, lo: 0xa0, hi: 0xa0}, + {value: 0x01eb, lo: 0xa1, hi: 0xa1}, + {value: 0x0176, lo: 0xa2, hi: 0xb2}, + {value: 0x018b, lo: 0xb3, hi: 0xb3}, + {value: 0x01a9, lo: 0xb4, hi: 0xba}, + {value: 0x0462, lo: 0xbb, hi: 0xbb}, + {value: 0x01be, lo: 0xbc, hi: 0xbf}, + // Block 0x9f, offset 0x32f + {value: 0x0003, lo: 0x0d}, + {value: 0x01ca, lo: 0x80, hi: 0x94}, + {value: 0x045e, lo: 0x95, hi: 0x95}, + {value: 0x01ca, lo: 0x96, hi: 0x96}, + {value: 0x01d3, lo: 0x97, hi: 0x97}, + {value: 0x01d9, lo: 0x98, hi: 0x98}, + {value: 0x01fd, lo: 0x99, hi: 0x99}, + {value: 0x01ee, lo: 0x9a, hi: 0x9a}, + {value: 0x01eb, lo: 0x9b, hi: 0x9b}, + {value: 0x0176, lo: 0x9c, hi: 0xac}, + {value: 0x018b, lo: 0xad, hi: 0xad}, + {value: 0x01a9, lo: 0xae, hi: 0xb4}, + {value: 0x0462, lo: 0xb5, hi: 0xb5}, + {value: 0x01be, lo: 0xb6, hi: 0xbf}, + // Block 0xa0, offset 0x33d + {value: 0x0003, lo: 0x0d}, + {value: 0x01dc, lo: 0x80, hi: 0x8e}, + {value: 0x045e, lo: 0x8f, hi: 0x8f}, + {value: 0x01ca, lo: 0x90, hi: 0x90}, + {value: 0x01d3, lo: 0x91, hi: 0x91}, + {value: 0x01d9, lo: 0x92, hi: 0x92}, + {value: 0x01fd, lo: 0x93, hi: 0x93}, + {value: 0x01ee, lo: 0x94, hi: 0x94}, + {value: 0x01eb, lo: 0x95, hi: 0x95}, + {value: 0x0176, lo: 0x96, hi: 0xa6}, + {value: 0x018b, lo: 0xa7, hi: 0xa7}, + {value: 0x01a9, lo: 0xa8, hi: 0xae}, + {value: 0x0462, lo: 0xaf, hi: 0xaf}, + {value: 0x01be, lo: 0xb0, hi: 0xbf}, + // Block 0xa1, offset 0x34b + {value: 0x0003, lo: 0x0d}, + {value: 0x01ee, lo: 0x80, hi: 0x88}, + {value: 0x045e, lo: 0x89, hi: 0x89}, + {value: 0x01ca, lo: 0x8a, hi: 0x8a}, + {value: 0x01d3, lo: 0x8b, hi: 0x8b}, + {value: 0x01d9, lo: 0x8c, hi: 0x8c}, + {value: 0x01fd, lo: 0x8d, hi: 0x8d}, + {value: 0x01ee, lo: 0x8e, hi: 0x8e}, + {value: 0x01eb, lo: 0x8f, hi: 0x8f}, + {value: 0x0176, lo: 0x90, hi: 0xa0}, + {value: 0x018b, lo: 0xa1, hi: 0xa1}, + {value: 0x01a9, lo: 0xa2, hi: 0xa8}, + {value: 0x0462, lo: 0xa9, hi: 0xa9}, + {value: 0x01be, lo: 0xaa, hi: 0xbf}, + // Block 0xa2, offset 0x359 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0xa3, offset 0x35f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa4, offset 0x361 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa5, offset 0x363 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa6, offset 0x366 + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1954, lo: 0x8a, hi: 0x8a}, + {value: 0x1987, lo: 0x8b, hi: 0x8b}, + {value: 0x19a2, lo: 0x8c, hi: 0x8c}, + {value: 0x19a8, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc6, lo: 0x8e, hi: 0x8e}, + {value: 0x19b4, lo: 0x8f, hi: 0x8f}, + {value: 0x197e, lo: 0xaa, hi: 0xaa}, + {value: 0x1981, lo: 0xab, hi: 0xab}, + {value: 0x1984, lo: 0xac, hi: 0xac}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x1942, lo: 0x90, hi: 0x90}, + // Block 0xa8, offset 0x373 + {value: 0x0028, lo: 0x09}, + {value: 0x286f, lo: 0x80, hi: 0x80}, + {value: 0x2833, lo: 0x81, hi: 0x81}, + {value: 0x283d, lo: 0x82, hi: 0x82}, + {value: 0x2851, lo: 0x83, hi: 0x84}, + {value: 0x285b, lo: 0x85, hi: 0x86}, + {value: 0x2847, lo: 0x87, hi: 0x87}, + {value: 0x2865, lo: 0x88, hi: 0x88}, + {value: 0x0b72, lo: 0x90, hi: 0x90}, + {value: 0x08ea, lo: 0x91, hi: 0x91}, + // Block 0xa9, offset 0x37d + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 55KB (56160 bytes) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 5c859677a7981..543942b9e781b 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +// +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go new file mode 100644 index 0000000000000..804264ca67d1c --- /dev/null +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -0,0 +1,1351 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.16 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 105 blocks, 6720 entries, 13440 bytes +// The third block is the zero block. +var widthValues = [6720]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, + 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, + // Block 0x3e, offset 0xf80 + 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, + 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, + 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, + 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, + 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, + 0xfbc: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, + 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, + 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, + 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, + 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, + 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, + 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, + 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, + 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, + 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, + 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, + 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, + 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, + 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, + 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, + 0x106a: 0x4000, 0x106b: 0x4000, + // Block 0x42, offset 0x1080 + 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, + 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, + 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, + 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, + 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, + 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, + 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, + 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, + 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, + 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, + 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, + 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, + 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, + 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, + 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, + 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, + 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, + 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, + 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, + 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, + 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, + // Block 0x44, offset 0x1100 + 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, + 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, + 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, + 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, + 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, + 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, + 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, + 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, + 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, + 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, + 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, + // Block 0x45, offset 0x1140 + 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, + 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, + 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, + 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, + 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, + 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, + 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, + 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, + 0x117d: 0x2000, + // Block 0x46, offset 0x1180 + 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, + 0x11a4: 0x4000, + 0x11b0: 0x4000, 0x11b1: 0x4000, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, + 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, + 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, + 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, + 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, + 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, + 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, + 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, + 0x11f6: 0x4000, 0x11f7: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bf: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, + 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, + 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, + 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, + 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, + // Block 0x5c, offset 0x1700 + 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, + 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, + 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, + 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, + 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, + 0x173a: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1755: 0x4000, 0x1756: 0x4000, + 0x1764: 0x4000, + // Block 0x5e, offset 0x1780 + 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x182b: 0x4000, 0x182c: 0x4000, + 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, + // Block 0x61, offset 0x1840 + 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, + 0x1986: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, + 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, + 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, + 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, + 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, + 0x19b6: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, + 0x19d0: 0x4000, 0x19d1: 0x4000, + 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, + 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, + 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, + 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, + 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, + 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, + 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, + 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, + 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, + 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, + 0x1a3c: 0x2000, 0x1a3d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, + 0x265: 0x3c, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, + // Block 0xd, offset 0x340 + 0x37f: 0x44, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, + 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, + 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, + 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index f516e17623d6b..f65aad4ec9f52 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "os" "os/exec" "regexp" + "strconv" "strings" "sync" "time" @@ -130,6 +131,12 @@ type Invocation struct { Verb string Args []string BuildFlags []string + ModFlag string + ModFile string + Overlay string + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool Env []string WorkingDir string Logf func(format string, args ...interface{}) @@ -158,17 +165,41 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { } goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) case "mod": - // mod needs the sub-verb before build flags. + // mod needs the sub-verb before flags. goArgs = append(goArgs, i.Args[0]) - goArgs = append(goArgs, i.BuildFlags...) + appendModFile() goArgs = append(goArgs, i.Args[1:]...) - case "env": - // env doesn't take build flags. + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() goArgs = append(goArgs, i.Args...) - default: + + default: // notably list and build. goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() goArgs = append(goArgs, i.Args...) } cmd := exec.Command("go", goArgs...) @@ -180,7 +211,10 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { // The Go stdlib has a special feature where if the cwd and the PWD are the // same node then it trusts the PWD, so by setting it in the env for the child // process we fix up all the paths returned by the go command. - cmd.Env = append(os.Environ(), i.Env...) + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) if i.WorkingDir != "" { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir @@ -221,10 +255,19 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { func cmdDebugStr(cmd *exec.Cmd) string { env := make(map[string]string) for _, kv := range cmd.Env { - split := strings.Split(kv, "=") + split := strings.SplitN(kv, "=", 2) k, v := split[0], split[1] env[k] = v } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 0000000000000..60d45ac0e64d0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "strings" +) + +// GoVersion checks the go version by running "go list" with modules off. +// It returns the X in Go 1.X. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`} + inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") + // Unset any unneeded flags. + inv.ModFile = "" + inv.ModFlag = "" + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 675d16c873b4b..d859617b77452 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -83,7 +83,7 @@ type ImportFix struct { IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). FixType ImportFixType - Relevance int // see pkg + Relevance float64 // see pkg } // An ImportInfo represents a single import statement. @@ -592,9 +592,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// Highest relevance, used for the standard library. Chosen arbitrarily to -// match pre-existing gopls code. -const MaxRelevance = 7 +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 // getCandidatePkgs works with the passed callback to find all acceptable packages. // It deduplicates by import path, and uses a cached stdlib rather than reading @@ -607,6 +607,10 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena if err != nil { return err } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + // Start off with the standard library. for importPath, exports := range stdlib { p := &pkg{ @@ -615,14 +619,12 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena packageName: path.Base(importPath), relevance: MaxRelevance, } + dupCheck[importPath] = struct{}{} if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { wrappedCallback.exportsLoaded(p, exports) } } - var mu sync.Mutex - dupCheck := map[string]struct{}{} - scanFilter := &scanCallback{ rootFound: func(root gopathwalk.Root) bool { // Exclude goroot results -- getting them is relatively expensive, not cached, @@ -658,8 +660,8 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena return resolver.scan(ctx, scanFilter) } -func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]int, error) { - result := make(map[string]int) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) resolver, err := env.GetResolver() if err != nil { return nil, err @@ -802,6 +804,8 @@ type ProcessEnv struct { GocmdRunner *gocommand.Runner BuildFlags []string + ModFlag string + ModFile string // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because @@ -995,7 +999,7 @@ type Resolver interface { // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) // scoreImportPath returns the relevance for an import path. - scoreImportPath(ctx context.Context, path string) int + scoreImportPath(ctx context.Context, path string) float64 ClearForNewScan() } @@ -1260,10 +1264,10 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") - importPathShort string // vendorless import path ("net/http", "a/b") - packageName string // package name loaded from source if requested - relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant. + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. } type pkgDistance struct { @@ -1389,7 +1393,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 94880d616041a..73f7a49587972 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -59,6 +59,8 @@ func (r *ModuleResolver) init() error { } inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, @@ -345,10 +347,11 @@ func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { } if r.dirInModuleCache(dir) { - matches := modCacheRegexp.FindStringSubmatch(dir) - index := strings.Index(dir, matches[1]+"@"+matches[2]) - modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) - return modDir, readModName(filepath.Join(modDir, "go.mod")) + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } } for { if info, ok := r.cacheLoad(dir); ok { @@ -487,7 +490,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return nil } -func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { if _, ok := stdlib[path]; ok { return MaxRelevance } @@ -495,17 +498,31 @@ func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { return modRelevance(mod) } -func modRelevance(mod *gocommand.ModuleJSON) int { +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 switch { case mod == nil: // out of scope return MaxRelevance - 4 case mod.Indirect: - return MaxRelevance - 3 + relevance = MaxRelevance - 3 case !mod.Main: - return MaxRelevance - 2 + relevance = MaxRelevance - 2 default: - return MaxRelevance - 1 // main module ties with stdlib + relevance = MaxRelevance - 1 // main module ties with stdlib + } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } } + + return relevance } // canonicalize gets the result of canonicalizing the packages using the results diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index 4a61a71d47bd3..fecc65193c7ee 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -778,7 +778,7 @@ ] }, "delete": { - "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have modify permissions for this Project.", + "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have delete permissions for this Project.", "flatPath": "v1/projects/{projectId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.projects.delete", @@ -1115,7 +1115,7 @@ ] }, "undelete": { - "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have modify permissions for this Project.", + "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have undelete permissions for this Project.", "flatPath": "v1/projects/{projectId}:undelete", "httpMethod": "POST", "id": "cloudresourcemanager.projects.undelete", @@ -1171,7 +1171,7 @@ } } }, - "revision": "20200907", + "revision": "20201027", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1238,7 +1238,6 @@ "id": "Binding", "properties": { "bindingId": { - "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", "type": "string" }, "condition": { @@ -1292,6 +1291,72 @@ }, "type": "object" }, + "CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation": { + "description": "Metadata describing a long running folder operation", + "id": "CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation", + "properties": { + "destinationParent": { + "description": "The resource name of the folder or organization we are either creating the folder under or moving the folder to.", + "type": "string" + }, + "displayName": { + "description": "The display name of the folder.", + "type": "string" + }, + "operationType": { + "description": "The type of this operation.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "MOVE" + ], + "enumDescriptions": [ + "Operation type not specified.", + "A create folder operation.", + "A move folder operation." + ], + "type": "string" + }, + "sourceParent": { + "description": "The resource name of the folder's parent. Only applicable when the operation_type is MOVE.", + "type": "string" + } + }, + "type": "object" + }, + "CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation": { + "description": "Metadata describing a long running folder operation", + "id": "CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation", + "properties": { + "destinationParent": { + "description": "The resource name of the folder or organization we are either creating the folder under or moving the folder to.", + "type": "string" + }, + "displayName": { + "description": "The display name of the folder.", + "type": "string" + }, + "operationType": { + "description": "The type of this operation.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "MOVE" + ], + "enumDescriptions": [ + "Operation type not specified.", + "A create folder operation.", + "A move folder operation." + ], + "type": "string" + }, + "sourceParent": { + "description": "The resource name of the folder's parent. Only applicable when the operation_type is MOVE.", + "type": "string" + } + }, + "type": "object" + }, "Constraint": { "description": "A `Constraint` describes a way in which a resource's configuration can be restricted. For example, it controls which cloud services can be activated across an organization, or whether a Compute Engine instance can have serial port connections established. `Constraints` can be configured by the organization's policy administrator to fit the needs of the organzation by setting Policies for `Constraints` at different locations in the organization's resource hierarchy. Policies are inherited down the resource hierarchy from higher levels, but can also be overridden. For details about the inheritance rules please read about [Policies](/resource-manager/reference/rest/v1/Policy). `Constraints` have a default behavior determined by the `constraint_default` field, which is the enforcement behavior that is used in the absence of a `Policy` being defined or inherited for the resource in question.", "id": "Constraint", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 1cbe27dbf7da7..0ad5f59b45f39 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -327,8 +327,6 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - // BindingId: A client-specified ID for this binding. Expected to be - // globally unique to support the internal bindings-by-ID API. BindingId string `json:"bindingId,omitempty"` // Condition: The condition that is associated with this binding. If the @@ -497,6 +495,98 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: +// Metadata describing a long running folder operation +type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struct { + // DestinationParent: The resource name of the folder or organization we + // are either creating the folder under or moving the folder to. + DestinationParent string `json:"destinationParent,omitempty"` + + // DisplayName: The display name of the folder. + DisplayName string `json:"displayName,omitempty"` + + // OperationType: The type of this operation. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Operation type not specified. + // "CREATE" - A create folder operation. + // "MOVE" - A move folder operation. + OperationType string `json:"operationType,omitempty"` + + // SourceParent: The resource name of the folder's parent. Only + // applicable when the operation_type is MOVE. + SourceParent string `json:"sourceParent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationParent") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationParent") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation) MarshalJSON() ([]byte, error) { + type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation: +// Metadata describing a long running folder operation +type CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation struct { + // DestinationParent: The resource name of the folder or organization we + // are either creating the folder under or moving the folder to. + DestinationParent string `json:"destinationParent,omitempty"` + + // DisplayName: The display name of the folder. + DisplayName string `json:"displayName,omitempty"` + + // OperationType: The type of this operation. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Operation type not specified. + // "CREATE" - A create folder operation. + // "MOVE" - A move folder operation. + OperationType string `json:"operationType,omitempty"` + + // SourceParent: The resource name of the folder's parent. Only + // applicable when the operation_type is MOVE. + SourceParent string `json:"sourceParent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationParent") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationParent") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation) MarshalJSON() ([]byte, error) { + type NoMethod CloudresourcemanagerGoogleCloudResourcemanagerV2beta1FolderOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Constraint: A `Constraint` describes a way in which a resource's // configuration can be restricted. For example, it controls which cloud // services can be activated across an organization, or whether a @@ -2181,7 +2271,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2325,7 +2415,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2470,7 +2560,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2612,7 +2702,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2777,7 +2867,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2942,7 +3032,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3084,7 +3174,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3213,7 +3303,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3359,7 +3449,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3530,7 +3620,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3702,7 +3792,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3838,7 +3928,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3988,7 +4078,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4128,7 +4218,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4274,7 +4364,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4419,7 +4509,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4561,7 +4651,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4726,7 +4816,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4890,7 +4980,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5044,7 +5134,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5187,7 +5277,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5330,7 +5420,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5471,7 +5561,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5620,7 +5710,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5721,7 +5811,7 @@ type ProjectsDeleteCall struct { // the Project with GetProject, and the Project remains visible to // ListProjects. However, you cannot update the project. After the // deletion completes, the Project is not retrievable by the GetProject -// and ListProjects methods. The caller must have modify permissions for +// and ListProjects methods. The caller must have delete permissions for // this Project. func (r *ProjectsService) Delete(projectId string) *ProjectsDeleteCall { c := &ProjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5756,7 +5846,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5815,7 +5905,7 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have modify permissions for this Project.", + // "description": "Marks the Project identified by the specified `project_id` (for example, `my-project-123`) for deletion. This method will only affect the Project if it has a lifecycle state of ACTIVE. This method changes the Project's lifecycle state from ACTIVE to DELETE_REQUESTED. The deletion starts at an unspecified time, at which point the Project is no longer accessible. Until the deletion completes, you can check the lifecycle state checked by retrieving the Project with GetProject, and the Project remains visible to ListProjects. However, you cannot update the project. After the deletion completes, the Project is not retrievable by the GetProject and ListProjects methods. The caller must have delete permissions for this Project.", // "flatPath": "v1/projects/{projectId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.projects.delete", @@ -5898,7 +5988,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6036,7 +6126,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6180,7 +6270,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6327,7 +6417,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6471,7 +6561,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6679,7 +6769,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6841,7 +6931,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7006,7 +7096,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7204,7 +7294,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7346,7 +7436,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7490,7 +7580,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7599,7 +7689,7 @@ type ProjectsUndeleteCall struct { // `project_id` (for example, `my-project-123`). You can only use this // method for a Project that has a lifecycle state of DELETE_REQUESTED. // After deletion starts, the Project cannot be restored. The caller -// must have modify permissions for this Project. +// must have undelete permissions for this Project. func (r *ProjectsService) Undelete(projectId string, undeleteprojectrequest *UndeleteProjectRequest) *ProjectsUndeleteCall { c := &ProjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -7634,7 +7724,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7698,7 +7788,7 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) } return ret, nil // { - // "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have modify permissions for this Project.", + // "description": "Restores the Project identified by the specified `project_id` (for example, `my-project-123`). You can only use this method for a Project that has a lifecycle state of DELETE_REQUESTED. After deletion starts, the Project cannot be restored. The caller must have undelete permissions for this Project.", // "flatPath": "v1/projects/{projectId}:undelete", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.undelete", @@ -7775,7 +7865,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 89d5ee3cb8f41..dcca968c0d28d 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/9fTQCPIy0iKPsZjvvoyzwyC1hLs\"", + "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/AubvcHOOmrHBNbxRjSZ8CR3OwJo\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -130,6 +130,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/acceleratorTypes", @@ -223,6 +228,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -287,6 +297,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/addresses", @@ -473,6 +488,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/addresses", @@ -531,6 +551,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/autoscalers", @@ -711,6 +736,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -1053,6 +1083,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/backendBuckets", @@ -1233,6 +1268,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/backendServices", @@ -1467,6 +1507,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/backendServices", @@ -1647,6 +1692,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/diskTypes", @@ -1740,6 +1790,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -1853,6 +1908,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/disks", @@ -1883,7 +1943,7 @@ "type": "string" }, "guestFlush": { - "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", "location": "query", "type": "boolean" }, @@ -2139,6 +2199,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -2539,6 +2604,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/externalVpnGateways", @@ -2770,6 +2840,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/firewalls", @@ -2910,6 +2985,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/forwardingRules", @@ -3096,6 +3176,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/forwardingRules", @@ -3352,6 +3437,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/addresses", @@ -3510,6 +3600,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/forwardingRules", @@ -3828,6 +3923,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networkEndpointGroups", @@ -3884,6 +3984,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", @@ -3942,6 +4047,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/operations", @@ -4055,6 +4165,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/operations", @@ -4147,6 +4262,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/healthChecks", @@ -4301,6 +4421,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/healthChecks", @@ -4541,6 +4666,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/httpHealthChecks", @@ -4781,6 +4911,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/httpsHealthChecks", @@ -5144,6 +5279,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/images", @@ -5156,6 +5296,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + "httpMethod": "PATCH", + "id": "compute.images.patch", + "parameterOrder": [ + "project", + "image" + ], + "parameters": { + "image": { + "description": "Name of the image resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/global/images/{image}", + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "httpMethod": "POST", @@ -5358,6 +5539,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instanceGroupManagers", @@ -5711,6 +5897,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -5774,6 +5965,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", @@ -5837,6 +6033,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located.", "location": "path", @@ -5900,6 +6101,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", "location": "path", @@ -6344,6 +6550,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instanceGroups", @@ -6401,7 +6612,7 @@ ] }, "get": { - "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.get", "parameterOrder": [ @@ -6481,7 +6692,7 @@ ] }, "list": { - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", "httpMethod": "GET", "id": "compute.instanceGroups.list", "parameterOrder": [ @@ -6519,6 +6730,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -6582,6 +6798,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the instance group is located.", "location": "path", @@ -6882,6 +7103,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/instanceTemplates", @@ -7118,6 +7344,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/instances", @@ -7141,7 +7372,7 @@ ], "parameters": { "forceAttach": { - "description": "Whether to force attach the regional disk even if it's currently attached to another instance.", + "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", "location": "query", "type": "boolean" }, @@ -7711,6 +7942,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -7730,7 +7966,7 @@ ] }, "listReferrers": { - "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed instance group, the referrers list includes the managed instance group. For more information, read Viewing Referrers to VM Instances.", + "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -7776,6 +8012,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -9053,6 +9294,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/interconnectAttachments", @@ -9244,6 +9490,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/interconnectAttachments", @@ -9380,6 +9631,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/interconnectLocations", @@ -9572,6 +9828,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/interconnects", @@ -9889,6 +10150,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/licenses", @@ -10020,6 +10286,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/machineTypes", @@ -10113,6 +10384,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -10177,6 +10453,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/networkEndpointGroups", @@ -10446,6 +10727,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -10509,6 +10795,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "location": "path", @@ -10761,6 +11052,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networks", @@ -10841,6 +11137,11 @@ "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", "location": "query", "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/networks/{network}/listPeeringRoutes", @@ -11109,6 +11410,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeGroups", @@ -11394,6 +11700,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11459,6 +11770,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11710,6 +12026,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeTemplates", @@ -11944,6 +12265,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/nodeTemplates", @@ -12091,6 +12417,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/nodeTypes", @@ -12184,6 +12515,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -12248,6 +12584,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/packetMirrorings", @@ -12434,6 +12775,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/packetMirrorings", @@ -12758,6 +13104,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/getXpnResources", @@ -12806,6 +13157,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/listXpnHosts", @@ -13166,6 +13522,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/autoscalers", @@ -13494,6 +13855,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/backendServices", @@ -13650,6 +14016,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/commitments", @@ -13790,6 +14161,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/commitments", @@ -13893,6 +14269,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/diskTypes", @@ -14233,6 +14614,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/disks", @@ -14657,6 +15043,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/healthCheckServices", @@ -14895,6 +15286,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/healthChecks", @@ -15402,6 +15798,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers", @@ -15465,6 +15866,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", @@ -15528,6 +15934,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", @@ -15591,6 +16002,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", @@ -16026,6 +16442,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroups", @@ -16089,6 +16510,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", @@ -16323,6 +16749,11 @@ "location": "path", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/networkEndpointGroups", @@ -16513,6 +16944,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/notificationEndpoints", @@ -16654,6 +17090,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/operations", @@ -16886,6 +17327,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/sslCertificates", @@ -17076,6 +17522,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetHttpProxies", @@ -17315,6 +17766,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetHttpsProxies", @@ -17603,6 +18059,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/urlMaps", @@ -17832,6 +18293,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions", @@ -17890,6 +18356,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/reservations", @@ -18118,6 +18589,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for this request.", "location": "path", @@ -18320,6 +18796,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/resourcePolicies", @@ -18554,6 +19035,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/resourcePolicies", @@ -18701,6 +19187,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/routers", @@ -18848,6 +19339,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "router": { "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", "location": "path", @@ -18994,6 +19490,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/routers", @@ -19295,6 +19796,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/routes", @@ -19529,6 +20035,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/securityPolicies", @@ -19578,6 +20089,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -19864,6 +20380,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/snapshots", @@ -20031,6 +20552,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/sslCertificates", @@ -20185,6 +20711,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslCertificates", @@ -20341,6 +20872,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslPolicies", @@ -20390,6 +20926,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/sslPolicies/listAvailableFeatures", @@ -20488,6 +21029,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/subnetworks", @@ -20771,6 +21317,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/subnetworks", @@ -20820,6 +21371,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/subnetworks/listUsable", @@ -21171,6 +21727,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetGrpcProxies", @@ -21270,6 +21831,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetHttpProxies", @@ -21424,6 +21990,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetHttpProxies", @@ -21436,6 +22007,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + "httpMethod": "PATCH", + "id": "compute.targetHttpProxies.patch", + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setUrlMap": { "description": "Changes the URL map for TargetHttpProxy.", "httpMethod": "POST", @@ -21523,6 +22135,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetHttpsProxies", @@ -21677,6 +22294,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetHttpsProxies", @@ -21897,6 +22519,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetInstances", @@ -22077,6 +22704,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone scoping this request.", "location": "path", @@ -22239,6 +22871,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetPools", @@ -22470,6 +23107,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetPools", @@ -22781,6 +23423,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetSslProxies", @@ -23102,6 +23749,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/targetTcpProxies", @@ -23242,6 +23894,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/targetVpnGateways", @@ -23428,6 +24085,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/targetVpnGateways", @@ -23486,6 +24148,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/urlMaps", @@ -23604,7 +24271,7 @@ ] }, "invalidateCache": { - "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.\n\nFor more information, see [Invalidating cached content](/cdn/docs/invalidating-cached-content).", "httpMethod": "POST", "id": "compute.urlMaps.invalidateCache", "parameterOrder": [ @@ -23681,6 +24348,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/urlMaps", @@ -23857,6 +24529,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/vpnGateways", @@ -24085,6 +24762,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/vpnGateways", @@ -24237,6 +24919,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/aggregated/vpnTunnels", @@ -24423,6 +25110,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/regions/{region}/vpnTunnels", @@ -24558,6 +25250,11 @@ "required": true, "type": "string" }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" + }, "zone": { "description": "Name of the zone for request.", "location": "path", @@ -24693,6 +25390,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + "location": "query", + "type": "boolean" } }, "path": "{project}/zones", @@ -24708,7 +25410,7 @@ } } }, - "revision": "20200811", + "revision": "20201007", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -24802,6 +25504,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -24816,6 +25525,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -24824,6 +25534,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24855,6 +25566,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -24928,6 +25641,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -24936,6 +25650,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -24967,6 +25682,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -25022,6 +25739,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -25030,6 +25748,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25061,6 +25780,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -25229,7 +25950,7 @@ "type": "integer" }, "purpose": { - "description": "The purpose of this resource, which can be one of the following values: \n- `GCE_ENDPOINT` for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. \n- `DNS_RESOLVER` for a DNS resolver address in a subnetwork \n- `VPC_PEERING` for addresses that are reserved for VPC peer networks. \n- `NAT_AUTO` for addresses that are external IP addresses automatically reserved for Cloud NAT.", + "description": "The purpose of this resource, which can be one of the following values: \n- `GCE_ENDPOINT` for addresses that are used by VM instances, alias IP ranges, internal load balancers, and similar resources. \n- `DNS_RESOLVER` for a DNS resolver address in a subnetwork \n- `VPC_PEERING` for addresses that are reserved for VPC peer networks. \n- `NAT_AUTO` for addresses that are external IP addresses automatically reserved for Cloud NAT. \n- `IPSEC_INTERCONNECT` for addresses created from a private IP range that are reserved for a VLAN attachment in an IPsec encrypted Interconnect configuration. These addresses are regional resources.", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", @@ -25310,6 +26031,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -25324,6 +26052,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -25332,6 +26061,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25363,6 +26093,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -25436,6 +26168,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -25444,6 +26177,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25475,6 +26209,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -25530,6 +26266,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -25538,6 +26275,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -25569,6 +26307,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -25660,18 +26400,6 @@ "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", "type": "string" }, - "maintenanceInterval": { - "description": "Specifies whether this VM may be a stable fleet VM. Setting this to \"Periodic\" designates this VM as a Stable Fleet VM.\n\nSee go/stable-fleet-ug for more details.", - "enum": [ - "AS_NEEDED", - "PERIODIC" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, "minCpuPlatform": { "description": "Minimum cpu platform the reservation.", "type": "string" @@ -26063,6 +26791,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -26077,6 +26812,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -26085,6 +26821,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26116,6 +26853,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -26189,6 +26928,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -26197,6 +26937,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26228,6 +26969,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -26337,6 +27080,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -26345,6 +27089,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26376,6 +27121,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -26479,12 +27226,21 @@ "description": "Custom utilization metric policy.", "id": "AutoscalingPolicyCustomMetricUtilization", "properties": { + "filter": { + "description": "A filter string, compatible with a Stackdriver Monitoring filter string for TimeSeries.list API call. This filter is used to select a specific TimeSeries for the purpose of autoscaling and to determine whether the metric is exporting per-instance or per-group data.\n\nFor the filter to be valid for autoscaling purposes, the following rules apply: \n- You can only use the AND operator for joining selectors. \n- You can only use direct equality comparison operator (=) without any functions for each selector. \n- You can specify the metric in both the filter string and in the metric field. However, if specified in both places, the metric must be identical. \n- The monitored resource type determines what kind of values are expected for the metric. If it is a gce_instance, the autoscaler expects the metric to include a separate TimeSeries for each instance in a group. In such a case, you cannot filter on resource labels.\nIf the resource type is any other value, the autoscaler expects this metric to contain values that apply to the entire autoscaled instance group and resource label filtering can be performed to point autoscaler at the correct TimeSeries to scale upon. This is called a per-group metric for the purpose of autoscaling.\n\nIf not specified, the type defaults to gce_instance. \n\nYou should provide a filter that is selective enough to pick just one TimeSeries for the autoscaled group or for each of the instances (if you are using gce_instance resource type). If multiple TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value.", + "type": "string" + }, "metric": { "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE.", "type": "string" }, + "singleInstanceAssignment": { + "description": "If scaling is based on a per-group metric value that represents the total amount of work to be done or resource usage, set this value to an amount assigned for a single instance of the scaled group. Autoscaler will keep the number of instances proportional to the value of this metric, the metric itself should not change value due to group resizing.\n\nA good metric to use with the target is for example pubsub.googleapis.com/subscription/num_undelivered_messages or a custom metric exporting the total number of requests coming to your instances.\n\nA bad example would be a metric exporting an average or median latency, since this value can't include a chunk assignable to a single instance, it could be better used with utilization_target instead.", + "format": "double", + "type": "number" + }, "utilizationTarget": { - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double", "type": "number" }, @@ -26713,6 +27469,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -26721,6 +27478,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -26752,6 +27510,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -26788,7 +27548,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. Only applicable if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE.\n\nIf set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is one day (86,400).\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -26959,6 +27719,10 @@ "description": "[Output Only] The resource URL for the security policy associated with this backend service.", "type": "string" }, + "securitySettings": { + "$ref": "SecuritySettings", + "description": "This field specifies the security policy that applies to this backend service. This field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED." + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -27022,6 +27786,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -27036,6 +27807,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27044,6 +27816,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27075,6 +27848,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -27239,6 +28014,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27247,6 +28023,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27278,6 +28055,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -27358,6 +28137,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27366,6 +28146,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27397,6 +28178,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -27432,6 +28215,10 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { + "bindingId": { + "description": "A client-specified ID for this binding. Expected to be globally unique to support the internal bindings-by-ID API.", + "type": "string" + }, "condition": { "$ref": "Expr", "description": "The condition that is associated with this binding.\n\nIf the condition evaluates to `true`, then this binding applies to the current request.\n\nIf the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding.\n\nTo learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." @@ -27668,6 +28455,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -27682,6 +28476,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27690,6 +28485,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27721,6 +28517,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -27794,6 +28592,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27802,6 +28601,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27833,6 +28633,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -27888,6 +28690,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -27896,6 +28699,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -27927,6 +28731,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28038,6 +28844,17 @@ }, "type": "object" }, + "ConfidentialInstanceConfig": { + "description": "A set of Confidential Instance options.", + "id": "ConfidentialInstanceConfig", + "properties": { + "enableConfidentialCompute": { + "description": "Defines whether the instance should have confidential compute enabled.", + "type": "boolean" + } + }, + "type": "object" + }, "ConnectionDraining": { "description": "Message containing connection draining configuration.", "id": "ConnectionDraining", @@ -28429,6 +29246,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -28443,6 +29267,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -28451,6 +29276,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28482,6 +29308,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28596,6 +29424,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -28604,6 +29433,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28635,6 +29465,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28763,6 +29595,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -28777,6 +29616,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -28785,6 +29625,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28816,6 +29657,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28889,6 +29732,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -28897,6 +29741,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -28928,6 +29773,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -28983,6 +29830,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -28991,6 +29839,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29022,6 +29871,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29114,6 +29965,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -29122,6 +29974,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29153,6 +30006,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29318,6 +30173,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -29326,6 +30182,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29357,6 +30214,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29544,6 +30403,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -29552,6 +30412,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29583,6 +30444,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29832,6 +30695,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -29840,6 +30704,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -29871,6 +30736,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -29952,7 +30819,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.", + "description": "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule.\n\nIf you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address:\n\n* IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name * Partial URL or by name, as in: * projects/project_id/regions/region/addresses/address-name * regions/region/addresses/address-name * global/addresses/address-name * address-name \n\nThe loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nMust be set to `0.0.0.0` when the target is targetGrpcProxy that has validateForProxyless field set to true.\n\nFor Private Service Connect forwarding rules that forward traffic to Google APIs, IP address must be provided.", "type": "string" }, "IPProtocol": { @@ -30059,7 +30926,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing.\n\nFor INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", + "description": "This field is not used for external load balancing.\n\nFor internal load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.\n\nFor Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -30103,11 +30970,11 @@ "type": "string" }, "subnetwork": { - "description": "This field is only used for INTERNAL load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", + "description": "This field is only used for internal load balancing.\n\nFor internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule.\n\nIf the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy and targetGrpcProxy are valid, not targetHttpsProxy.", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).\n\nFor Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle. Currently, the supported Google API bundles include:\n\n \n- vpc-sc - GCP APIs that support VPC Service Controls. For more information about which APIs support VPC Service Controls, refer to VPC-SC supported products and limitations. \n- all-apis - All GCP APIs. For more information about which APIs are supported with this bundle, refer to Private Google Access-specific domains and VIPs.", "type": "string" } }, @@ -30141,6 +31008,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -30155,6 +31029,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -30163,6 +31038,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30194,6 +31070,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -30267,6 +31145,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -30275,6 +31154,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30306,6 +31186,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -30370,6 +31252,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -30378,6 +31261,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30409,6 +31293,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -30792,7 +31678,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers must use regional health checks. Internal TCP/UDP load balancers can use either regional or global health checks. All other types of GCP load balancers and managed instance group auto-healing must use global health checks. For more information, read Health Check Concepts.\n\nTo perform health checks on network load balancers, you must use either httpHealthChecks or httpsHealthChecks.", + "description": "Represents a Health Check resource.\n\nGoogle Compute Engine has two Health Check resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/healthChecks) * [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChecks)\n\nInternal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`).\n\nTraffic Director must use global health checks (`compute.v1.HealthChecks`).\n\nInternal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`).\n\nExternal HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`).\n\nNetwork load balancers must use legacy HTTP health checks (httpHealthChecks).\n\nFor more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -30835,6 +31721,10 @@ "description": "Type of the resource.", "type": "string" }, + "logConfig": { + "$ref": "HealthCheckLogConfig", + "description": "Configure logging on this health check." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -30931,6 +31821,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -30939,6 +31830,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -30970,6 +31862,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -31001,6 +31895,17 @@ }, "type": "object" }, + "HealthCheckLogConfig": { + "description": "Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver.", + "id": "HealthCheckLogConfig", + "properties": { + "enable": { + "description": "Indicates whether or not to export logs. This is false by default, which means no health check logging will be done.", + "type": "boolean" + } + }, + "type": "object" + }, "HealthCheckReference": { "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", "id": "HealthCheckReference", @@ -31138,6 +32043,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -31146,6 +32052,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31177,6 +32084,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -31236,6 +32145,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -31250,6 +32166,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -31258,6 +32175,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31289,6 +32207,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -31344,6 +32264,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -31352,6 +32273,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31383,6 +32305,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -31499,7 +32423,7 @@ "type": "string" }, "hosts": { - "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..\n\n* based matching is not supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The list of host patterns to match. They must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..\n* based matching is not supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "type": "string" }, @@ -31604,7 +32528,7 @@ "type": "string" }, "headerName": { - "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".", + "description": "The name of the HTTP header to match.\nFor matching against the HTTP request's authority, use a headerMatch with the header name \":authority\".\nFor matching a request's method, use the headerName \":method\".\nWhen the URL map is bound to target gRPC proxy that has validateForProxyless field set to true, only non-binary user-specified custom metadata and the `content-type` header are supported. The following transport-level headers cannot be used in header matching rules: `:authority`, `:method`, `:path`, `:scheme`, `user-agent`, `accept-encoding`, `content-encoding`, `grpc-accept-encoding`, `grpc-encoding`, `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and `grpc-trace-bin.", "type": "string" }, "invertMatch": { @@ -31762,6 +32686,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -31770,6 +32695,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -31801,6 +32727,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -31928,30 +32856,30 @@ "properties": { "corsPolicy": { "$ref": "CorsPolicy", - "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing" + "description": "The specification for allowing client side cross-origin requests. Please see W3C Recommendation for Cross Origin Resource Sharing \nNot supported when the URL map is bound to target gRPC proxy." }, "faultInjectionPolicy": { "$ref": "HttpFaultInjection", - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy." + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted by the Loadbalancer for a percentage of requests.\ntimeout and retry_policy will be ignored by clients that are configured with a fault_injection_policy.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "requestMirrorPolicy": { "$ref": "RequestMirrorPolicy", - "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow." + "description": "Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, the host / authority header is suffixed with -shadow.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "retryPolicy": { "$ref": "HttpRetryPolicy", - "description": "Specifies the retry policy associated with this route." + "description": "Specifies the retry policy associated with this route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "timeout": { "$ref": "Duration", - "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route." + "description": "Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries.\nIf not specified, will use the largest timeout among all backend services associated with the route.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service.\nurlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers." + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service.\nurlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weightedBackendServices": { - "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", + "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", "items": { "$ref": "WeightedBackendService" }, @@ -31970,7 +32898,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here are applied before the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "matchRules": { "description": "The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.", @@ -31994,7 +32922,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When this rule is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -32015,11 +32943,11 @@ "type": "array" }, "ignoreCase": { - "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.", + "description": "Specifies that prefixMatch and fullPathMatch matches are case sensitive.\nThe default value is false.\nignoreCase must not be used with regexMatch.\nNot supported when the URL map is bound to target gRPC proxy.", "type": "boolean" }, "metadataFilters": { - "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of xDS compliant clients. In their xDS requests to Loadbalancer, xDS clients present node metadata. When there is a match, the relevant routing configuration is made available to those proxies.\nFor each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. If multiple metadataFilters are specified, all of them need to be satisfied in order to be considered a match.\nmetadataFilters specified here will be applied after those specified in ForwardingRule that refers to the UrlMap this HttpRouteRuleMatch belongs to.\nmetadataFilters only applies to Loadbalancers that have their loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "$ref": "MetadataFilter" }, @@ -32030,7 +32958,7 @@ "type": "string" }, "queryParameterMatches": { - "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.", + "description": "Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.\nNot supported when the URL map is bound to target gRPC proxy.", "items": { "$ref": "HttpQueryParameterMatch" }, @@ -32152,6 +33080,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -32160,6 +33089,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32191,6 +33121,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -32463,6 +33395,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -32471,6 +33404,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32502,6 +33436,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -32573,6 +33509,9 @@ "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding.", "type": "boolean" }, + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig" + }, "cpuPlatform": { "description": "[Output Only] The CPU platform used by this instance.", "type": "string" @@ -32638,6 +33577,18 @@ "description": "Labels to apply to this instance. These can be later modified by the setLabels method.", "type": "object" }, + "lastStartTimestamp": { + "description": "[Output Only] Last start timestamp in RFC3339 text format.", + "type": "string" + }, + "lastStopTimestamp": { + "description": "[Output Only] Last stop timestamp in RFC3339 text format.", + "type": "string" + }, + "lastSuspendedTimestamp": { + "description": "[Output Only] Last suspended timestamp in RFC3339 text format.", + "type": "string" + }, "machineType": { "annotations": { "required": [ @@ -32723,7 +33674,7 @@ "type": "boolean" }, "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see Instance life cycle.", "enum": [ "DEPROVISIONING", "PROVISIONING", @@ -32793,6 +33744,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -32807,6 +33765,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -32815,6 +33774,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -32846,6 +33806,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -32977,6 +33939,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -32991,6 +33960,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -32999,6 +33969,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33030,6 +34001,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -33103,6 +34076,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33111,6 +34085,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33142,6 +34117,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -33382,6 +34359,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -33396,6 +34380,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33404,6 +34389,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33435,6 +34421,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -33524,6 +34512,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33532,6 +34521,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33563,6 +34553,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -33751,6 +34743,10 @@ "description": "InstanceGroupManagers.applyUpdatesToInstances", "id": "InstanceGroupManagersApplyUpdatesRequest", "properties": { + "allInstances": { + "description": "Flag to update all instances instead of specified list of ?instances?. If the flag is set to true then the instances may not be specified in the request.", + "type": "boolean" + }, "instances": { "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { @@ -33896,6 +34892,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -33904,6 +34901,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -33935,6 +34933,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34017,6 +35017,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34025,6 +35026,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34056,6 +35058,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34183,6 +35187,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34191,6 +35196,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34222,6 +35228,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34308,6 +35316,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34316,6 +35325,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34347,6 +35357,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34438,6 +35450,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34446,6 +35459,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34477,6 +35491,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34550,6 +35566,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34558,6 +35575,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34589,6 +35607,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -34714,6 +35734,10 @@ "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" }, + "confidentialInstanceConfig": { + "$ref": "ConfidentialInstanceConfig", + "description": "Specifies the Confidential Instance options." + }, "description": { "description": "An optional text description for the instances that are created from these properties.", "type": "string" @@ -34912,6 +35936,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -34920,6 +35945,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -34951,6 +35977,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -35077,6 +36105,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35085,6 +36114,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35116,6 +36146,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -35605,6 +36637,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -35619,6 +36658,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35627,6 +36667,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35658,6 +36699,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -35731,6 +36774,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35739,6 +36783,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35770,6 +36815,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -35856,6 +36903,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -35864,6 +36912,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -35895,6 +36944,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -36114,6 +37165,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -36122,6 +37174,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36153,6 +37206,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -36330,6 +37385,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -36338,6 +37394,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36369,6 +37426,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -36729,6 +37788,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -36737,6 +37797,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -36768,6 +37829,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37030,6 +38093,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -37044,6 +38114,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37052,6 +38123,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37083,6 +38155,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37156,6 +38230,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37164,6 +38239,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37195,6 +38271,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37250,6 +38328,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37258,6 +38337,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37289,6 +38369,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37630,6 +38712,11 @@ "description": "[Output Only] Type of the resource. Always compute#network for networks.", "type": "string" }, + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes.", + "format": "int32", + "type": "integer" + }, "name": { "annotations": { "required": [ @@ -37820,6 +38907,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -37834,6 +38928,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -37842,6 +38937,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -37873,6 +38969,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37909,7 +39007,7 @@ "id": "NetworkEndpointGroupAppEngine", "properties": { "service": { - "description": "Optional serving service.\n\nThe service name must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"default\", \"my-service\".", + "description": "Optional serving service.\n\nThe service name is case-sensitive and must be 1-63 characters long.\n\nExample value: \"default\", \"my-service\".", "type": "string" }, "urlMask": { @@ -37917,7 +39015,7 @@ "type": "string" }, "version": { - "description": "Optional serving version.\n\nThe version must be 1-63 characters long, and comply with RFC1035.\n\nExample value: \"v1\", \"v2\".", + "description": "Optional serving version.\n\nThe version name is case-sensitive and must be 1-100 characters long.\n\nExample value: \"v1\", \"v2\".", "type": "string" } }, @@ -37998,6 +39096,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38006,6 +39105,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38037,6 +39137,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -38149,6 +39251,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38157,6 +39260,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38188,6 +39292,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -38243,6 +39349,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38251,6 +39358,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38282,6 +39390,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -38423,6 +39533,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38431,6 +39542,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38462,6 +39574,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -38529,6 +39643,11 @@ "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network.", "type": "string" }, + "peerMtu": { + "description": "Maximum Transmission Unit in bytes.", + "format": "int32", + "type": "integer" + }, "state": { "description": "[Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The peering is `ACTIVE` when there's a matching configuration in the peer network.", "enum": [ @@ -38726,6 +39845,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -38740,6 +39866,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38748,6 +39875,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38779,6 +39907,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -38814,7 +39944,7 @@ "id": "NodeGroupAutoscalingPolicy", "properties": { "maxNodes": { - "description": "The maximum number of nodes that the group should have.", + "description": "The maximum number of nodes that the group should have. Must be set if autoscaling is enabled. Maximum value allowed is 100.", "format": "int32", "type": "integer" }, @@ -38884,6 +40014,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -38892,6 +40023,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -38923,6 +40055,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39079,6 +40213,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39087,6 +40222,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39118,6 +40254,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39173,6 +40311,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39181,6 +40320,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39212,6 +40352,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39371,6 +40513,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -39385,6 +40534,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39393,6 +40543,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39424,6 +40575,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39497,6 +40650,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39505,6 +40659,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39536,6 +40691,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39606,6 +40763,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39614,6 +40772,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39645,6 +40804,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39765,6 +40926,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -39779,6 +40947,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39787,6 +40956,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39818,6 +40988,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39891,6 +41063,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39899,6 +41072,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -39930,6 +41104,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -39985,6 +41161,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -39993,6 +41170,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40024,6 +41202,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40166,6 +41346,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40174,6 +41355,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40205,6 +41387,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40377,6 +41561,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40385,6 +41570,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40416,6 +41602,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40481,6 +41669,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -40495,6 +41690,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40503,6 +41699,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40534,6 +41731,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40607,6 +41806,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40615,6 +41815,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40646,6 +41847,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40701,6 +41904,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40709,6 +41913,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40740,6 +41945,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40941,6 +42148,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -40955,6 +42169,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -40963,6 +42178,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -40994,6 +42210,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -41101,6 +42319,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41109,6 +42328,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41140,6 +42360,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -41264,6 +42486,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -41272,6 +42495,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -41303,6 +42527,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -41340,15 +42566,15 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." }, "defaultService": { - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use \n\npathMatchers[].defaultService is the only option available when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", "type": "string" }, "defaultUrlRedirect": { "$ref": "HttpRedirectAction", - "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy." + "description": "When none of the specified pathRules or routeRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", @@ -41356,21 +42582,21 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nHeaderAction specified here are applied after the matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap \nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "name": { "description": "The name to which this PathMatcher is referred by the HostRule.", "type": "string" }, "pathRules": { - "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The list of path rules. Use this list instead of routeRules when routing based on simple path matching is all that's required. The order by which path rules are specified does not matter. Matches are always done on the longest-path-first basis.\nFor example: a pathRule with a path /a/b/c/* will match before /a/b/* irrespective of the order in which those paths appear in this list.\nWithin a given pathMatcher, only one of pathRules or routeRules must be set.", "items": { "$ref": "PathRule" }, "type": "array" }, "routeRules": { - "description": "The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number.\nWithin a given pathMatcher, you can set only one of pathRules or routeRules.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The list of HTTP route rules. Use this list instead of pathRules when advanced route matching and routing actions are desired. routeRules are evaluated in order of priority, from the lowest to highest number.\nWithin a given pathMatcher, you can set only one of pathRules or routeRules.", "items": { "$ref": "HttpRouteRule" }, @@ -41400,7 +42626,7 @@ }, "urlRedirect": { "$ref": "HttpRedirectAction", - "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set." + "description": "When a path pattern is matched, the request is redirected to a URL specified by urlRedirect.\nIf urlRedirect is specified, service or routeAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." } }, "type": "object" @@ -42077,6 +43303,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42085,6 +43312,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42116,6 +43344,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -42188,6 +43418,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42196,6 +43427,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42227,6 +43459,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -42337,6 +43571,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42345,6 +43580,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42376,6 +43612,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -42463,6 +43701,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42471,6 +43710,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42502,6 +43742,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -42578,6 +43820,10 @@ "description": "RegionInstanceGroupManagers.applyUpdatesToInstances", "id": "RegionInstanceGroupManagersApplyUpdatesRequest", "properties": { + "allInstances": { + "description": "Flag to update all instances instead of specified list of ?instances?. If the flag is set to true then the instances may not be specified in the request.", + "type": "boolean" + }, "instances": { "description": "The list of URLs of one or more instances for which you want to apply updates. Each URL can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { @@ -42692,6 +43938,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42700,6 +43947,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42731,6 +43979,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -42861,6 +44111,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -42869,6 +44120,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -42900,6 +44152,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43014,6 +44268,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43022,6 +44277,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43053,6 +44309,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43294,6 +44552,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43308,6 +44573,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43316,6 +44582,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43347,6 +44614,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43419,6 +44688,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43427,6 +44697,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43458,6 +44729,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43524,6 +44797,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43532,6 +44806,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43563,6 +44838,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43662,6 +44939,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43670,6 +44948,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43701,6 +44980,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -43745,7 +45026,7 @@ }, "groupPlacementPolicy": { "$ref": "ResourcePolicyGroupPlacementPolicy", - "description": "Resource policy for instacnes for placement configuration." + "description": "Resource policy for instances for placement configuration." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -43829,6 +45110,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -43843,6 +45131,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -43851,6 +45140,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -43882,6 +45172,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -44026,6 +45318,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44034,6 +45327,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44065,6 +45359,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -44161,6 +45457,10 @@ "description": "Specified snapshot properties for scheduled snapshots created by this policy.", "id": "ResourcePolicySnapshotSchedulePolicySnapshotProperties", "properties": { + "chainName": { + "description": "Chain name that the snapshot is created in.", + "type": "string" + }, "guestFlush": { "description": "Indication to perform a 'guest aware' snapshot.", "type": "boolean" @@ -44289,7 +45589,7 @@ "type": "string" }, "nextHopIlb": { - "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets. You can only specify the forwarding rule as a partial or full URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule \n- regions/region/forwardingRules/forwardingRule", + "description": "The URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets or the IP address of the forwarding Rule. For example, the following are all valid URLs: \n- 10.128.0.56 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule \n- regions/region/forwardingRules/forwardingRule", "type": "string" }, "nextHopInstance": { @@ -44353,6 +45653,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44361,6 +45662,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44392,6 +45694,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -44467,6 +45771,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44475,6 +45780,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44506,6 +45812,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -44659,6 +45967,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -44673,6 +45988,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44681,6 +45997,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44712,6 +46029,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -44955,6 +46274,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -44963,6 +46283,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -44994,6 +46315,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -45373,6 +46696,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45381,6 +46705,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45412,6 +46737,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -45559,7 +46886,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 12", + "description": "Sets the scheduling options for an Instance. NextID: 13", "id": "Scheduling", "properties": { "automaticRestart": { @@ -45664,7 +46991,7 @@ "type": "object" }, "SecurityPolicy": { - "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services that use load balancers can reference a Security Policy. For more information, read Cloud Armor Security Policy Concepts. (== resource_for {$api_version}.securityPolicies ==)", + "description": "Represents a Google Cloud Armor security policy resource.\n\nOnly external backend services that use load balancers can reference a security policy. For more information, see Google Cloud Armor security policy overview. (== resource_for {$api_version}.securityPolicies ==)", "id": "SecurityPolicy", "properties": { "creationTimestamp": { @@ -45746,6 +47073,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -45754,6 +47082,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -45785,6 +47114,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -45896,6 +47227,24 @@ }, "type": "object" }, + "SecuritySettings": { + "description": "The authentication and authorization settings for a BackendService.", + "id": "SecuritySettings", + "properties": { + "clientTlsPolicy": { + "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends.\nclientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nIf left blank, communications are not encrypted.\nNote: This field currently has no impact.", + "type": "string" + }, + "subjectAltNames": { + "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service.\nNote that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities.\nOnly applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).\nNote: This field currently has no impact.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SerialPortOutput": { "description": "An instance's serial console output.", "id": "SerialPortOutput", @@ -46052,6 +47401,10 @@ "description": "[Output Only] Set to true if snapshots are automatically created by applying resource policy on the target disk.", "type": "boolean" }, + "chainName": { + "description": "Creates the new snapshot in the snapshot chain labeled with the specified name. The chain name must be 1-63 characters long and comply with RFC1035. This is an uncommon option only for advanced service owners who needs to create separate snapshot chains, for example, for chargeback tracking. When you describe your snapshot resource, this field is visible only if it has a non-empty value.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -46121,7 +47474,7 @@ "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the snapshot later. For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the snapshot.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." }, "sourceDisk": { - "description": "[Output Only] The source disk used to create this snapshot.", + "description": "The source disk used to create this snapshot.", "type": "string" }, "sourceDiskEncryptionKey": { @@ -46219,6 +47572,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46227,6 +47581,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46258,6 +47613,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -46308,7 +47665,7 @@ "id": "SslCertificate", "properties": { "certificate": { - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", + "description": "A value read into memory from a certificate file. The certificate file must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert.", "type": "string" }, "creationTimestamp": { @@ -46343,7 +47700,7 @@ "type": "string" }, "privateKey": { - "description": "A write-only private key in PEM format. Only insert requests will include this field.", + "description": "A value read into memory from a write-only private key file. The private key file must be in PEM format. For security, only insert requests include this field.", "type": "string" }, "region": { @@ -46410,6 +47767,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -46424,6 +47788,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46432,6 +47797,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46463,6 +47829,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -46536,6 +47904,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46544,6 +47913,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46575,6 +47945,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -46704,6 +48076,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46712,6 +48085,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46743,6 +48117,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -46815,6 +48191,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -46823,6 +48200,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -46854,6 +48232,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -46898,7 +48278,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents a Cloud Armor Security Policy resource.\n\nOnly external backend services used by HTTP or HTTPS load balancers can reference a Security Policy. For more information, read read Cloud Armor Security Policy Concepts. (== resource_for {$api_version}.sslPolicies ==)", + "description": "Represents a Google Cloud Armor security policy resource.\n\nOnly external backend services used by HTTP or HTTPS load balancers can reference a security policy. For more information, see Google Cloud Armor security policy overview. (== resource_for {$api_version}.sslPolicies ==)", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -46992,6 +48372,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47000,6 +48381,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47031,6 +48413,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47273,6 +48657,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -47287,6 +48678,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47295,6 +48687,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47326,6 +48719,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47399,6 +48794,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47407,6 +48803,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47438,6 +48835,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47579,6 +48978,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47587,6 +48987,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47618,6 +49019,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47823,6 +49226,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47831,6 +49235,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47862,6 +49267,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47917,6 +49324,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -47925,6 +49333,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -47956,6 +49365,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47999,6 +49410,11 @@ "description": "An optional description of this resource. Provide this property when you create the resource.", "type": "string" }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a TargetHttpProxy. An up-to-date fingerprint must be provided in order to patch/update the TargetHttpProxy; otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the TargetHttpProxy.", + "format": "byte", + "type": "string" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -48014,6 +49430,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.\n\nWhen this field is set to true, Envoy proxies set up inbound traffic interception and bind to the IP address and port specified in the forwarding rule. This is generally useful when using Traffic Director to configure Envoy as a gateway or middle proxy (in other words, not a sidecar proxy). The Envoy proxy listens for inbound requests and handles requests when it receives them.\n\nThe default is false.", + "type": "boolean" + }, "region": { "description": "[Output Only] URL of the region where the regional Target HTTP Proxy resides. This field is not applicable to global Target HTTP Proxies.", "type": "string" @@ -48056,6 +49476,13 @@ "selfLink": { "description": "[Output Only] Server-defined URL for this resource.", "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -48102,6 +49529,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48110,6 +49538,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48141,6 +49570,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48196,6 +49627,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48204,6 +49636,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48235,6 +49668,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48303,6 +49738,10 @@ "description": "Represents a Target HTTPS Proxy resource.\n\nGoogle Compute Engine has two Target HTTPS Proxy resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/targetHttpsProxies) * [Regional](/compute/docs/reference/rest/{$api_version}/regionTargetHttpsProxies)\n\nA target HTTPS proxy is a component of GCP HTTPS load balancers.\n\n* targetHttpsProxies are used by external HTTPS load balancers. * regionTargetHttpsProxies are used by internal HTTPS load balancers.\n\nForwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts. (== resource_for {$api_version}.targetHttpsProxies ==) (== resource_for {$api_version}.regionTargetHttpsProxies ==)", "id": "TargetHttpsProxy", "properties": { + "authorizationPolicy": { + "description": "Optional. A URL referring to a networksecurity.AuthorizationPolicy resource that describes how the proxy should authorize inbound traffic. If left blank, access will not be restricted by an authorization policy.\nRefer to the AuthorizationPolicy resource for additional details.\nauthorizationPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nNote: This field currently has no impact.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -48326,6 +49765,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.\n\nWhen this field is set to true, Envoy proxies set up inbound traffic interception and bind to the IP address and port specified in the forwarding rule. This is generally useful when using Traffic Director to configure Envoy as a gateway or middle proxy (in other words, not a sidecar proxy). The Envoy proxy listens for inbound requests and handles requests when it receives them.\n\nThe default is false.", + "type": "boolean" + }, "quicOverride": { "description": "Specifies the QUIC override policy for this TargetHttpsProxy resource. This setting determines whether the load balancer attempts to negotiate QUIC with clients. You can specify NONE, ENABLE, or DISABLE. \n- When quic-override is set to NONE, Google manages whether QUIC is used. \n- When quic-override is set to ENABLE, the load balancer uses QUIC when possible. \n- When quic-override is set to DISABLE, the load balancer doesn't use QUIC. \n- If the quic-override flag is not specified, NONE is implied.\n-", "enum": [ @@ -48348,6 +49791,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "serverTlsPolicy": { + "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic.\nserverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED.\nIf left blank, communications are not encrypted.\nNote: This field currently has no impact.", + "type": "string" + }, "sslCertificates": { "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", "items": { @@ -48394,6 +49841,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -48408,6 +49862,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48416,6 +49871,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48447,6 +49903,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48520,6 +49978,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48528,6 +49987,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48559,6 +50019,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48670,6 +50132,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -48684,6 +50153,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48692,6 +50162,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48723,6 +50194,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48796,6 +50269,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48804,6 +50278,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48835,6 +50310,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48890,6 +50367,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -48898,6 +50376,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -48929,6 +50408,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -48965,7 +50446,7 @@ "id": "TargetPool", "properties": { "backupPool": { - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "description": "The server-defined URL for the resource. This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", "type": "string" }, "creationTimestamp": { @@ -49071,6 +50552,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -49085,6 +50573,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49093,6 +50582,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49124,6 +50614,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49214,6 +50706,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49222,6 +50715,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49253,6 +50747,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49360,6 +50856,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49368,6 +50865,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49399,6 +50897,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49583,6 +51083,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49591,6 +51092,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49622,6 +51124,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49773,6 +51277,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49781,6 +51286,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -49812,6 +51318,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -49953,6 +51461,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -49967,6 +51482,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -49975,6 +51491,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50006,6 +51523,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50079,6 +51598,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50087,6 +51607,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50118,6 +51639,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50173,6 +51696,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50181,6 +51705,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50212,6 +51737,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50247,15 +51774,19 @@ "id": "TestFailure", "properties": { "actualService": { + "description": "BackendService or BackendBucket returned by load balancer.", "type": "string" }, "expectedService": { + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.", "type": "string" }, "host": { + "description": "Host portion of the URL.", "type": "string" }, "path": { + "description": "Path portion including query parameters in the URL.", "type": "string" } }, @@ -50297,15 +51828,15 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction.\n\ndefaultRouteAction has no effect when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction.\ndefaultRouteAction has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "defaultService": { - "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\n\ndefaultService has no effect when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\ndefaultService has no effect when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "type": "string" }, "defaultUrlRedirect": { "$ref": "HttpRedirectAction", - "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy." + "description": "When none of the specified hostRules match, the request is redirected to a URL specified by defaultUrlRedirect.\nIf defaultUrlRedirect is specified, defaultService or defaultRouteAction must not be set.\nNot supported when the URL map is bound to target gRPC proxy." }, "description": { "description": "An optional description of this resource. Provide this property when you create the resource.", @@ -50318,7 +51849,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nThe headerAction specified here take effect after headerAction specified under pathMatcher.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "hostRules": { "description": "The list of HostRules to use against the URL.", @@ -50358,7 +51889,7 @@ "type": "string" }, "tests": { - "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.\n\nNot supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The list of expected URL mapping tests. Request to update this UrlMap will succeed only if all of the test cases pass. You can specify a maximum of 100 tests per UrlMap.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true.", "items": { "$ref": "UrlMapTest" }, @@ -50409,6 +51940,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50417,6 +51949,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50448,6 +51981,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50497,7 +52032,7 @@ "type": "string" }, "host": { - "description": "Host portion of the URL.", + "description": "Host portion of the URL. If headers contains a host header, then host must also match the header value.", "type": "string" }, "path": { @@ -50505,7 +52040,7 @@ "type": "string" }, "service": { - "description": "Expected BackendService resource the given URL should be mapped to.", + "description": "Expected BackendService or BackendBucket resource the given URL should be mapped to.\nservice cannot be set if expectedRedirectResponseCode is set.", "type": "string" } }, @@ -50566,6 +52101,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -50580,6 +52122,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50588,6 +52131,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50619,6 +52163,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50674,6 +52220,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50682,6 +52229,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50713,6 +52261,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -50860,6 +52410,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -50868,6 +52419,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -50899,6 +52451,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51043,6 +52597,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51051,6 +52606,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51082,6 +52638,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51212,6 +52770,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -51226,6 +52791,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51234,6 +52800,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51265,6 +52832,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51338,6 +52907,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51346,6 +52916,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51377,6 +52948,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51546,6 +53119,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51554,6 +53128,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51585,6 +53160,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51786,6 +53363,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -51800,6 +53384,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51808,6 +53393,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51839,6 +53425,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -51912,6 +53500,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -51920,6 +53509,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -51951,6 +53541,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -52006,6 +53598,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52014,6 +53607,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52045,6 +53639,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -52120,7 +53716,7 @@ }, "headerAction": { "$ref": "HttpHeaderAction", - "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap." + "description": "Specifies changes to request and response headers that need to take effect for the selected backendService.\nheaderAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.\nNote that headerAction is not supported for Loadbalancers that have their loadBalancingScheme set to EXTERNAL.\nNot supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true." }, "weight": { "description": "Specifies the fraction of traffic sent to backendService, computed as weight / (sum of all weightedBackendService weights in routeAction) .\nThe selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backendService, subsequent requests will be sent to the same backendService as determined by the BackendService's session affinity policy.\nThe value must be between 0 and 1000", @@ -52171,6 +53767,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52179,6 +53776,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52210,6 +53808,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -52366,6 +53966,7 @@ "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "LARGE_DEPLOYMENT_WARNING", "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -52374,6 +53975,7 @@ "NEXT_HOP_NOT_RUNNING", "NOT_CRITICAL_ERROR", "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", @@ -52405,6 +54007,8 @@ "", "", "", + "", + "", "" ], "type": "string" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 0a26545329c9b..7046a5bb858b4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1178,6 +1178,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` @@ -1224,6 +1227,7 @@ type AcceleratorTypeAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -1232,6 +1236,7 @@ type AcceleratorTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1380,6 +1385,7 @@ type AcceleratorTypeListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -1388,6 +1394,7 @@ type AcceleratorTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1515,6 +1522,7 @@ type AcceleratorTypesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -1523,6 +1531,7 @@ type AcceleratorTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -1781,6 +1790,9 @@ type Address struct { // networks. // - `NAT_AUTO` for addresses that are external IP addresses // automatically reserved for Cloud NAT. + // - `IPSEC_INTERCONNECT` for addresses created from a private IP range + // that are reserved for a VLAN attachment in an IPsec encrypted + // Interconnect configuration. These addresses are regional resources. // // Possible values: // "DNS_RESOLVER" @@ -1870,6 +1882,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AddressAggregatedListWarning `json:"warning,omitempty"` @@ -1916,6 +1931,7 @@ type AddressAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -1924,6 +1940,7 @@ type AddressAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2071,6 +2088,7 @@ type AddressListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -2079,6 +2097,7 @@ type AddressListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2204,6 +2223,7 @@ type AddressesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -2212,6 +2232,7 @@ type AddressesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -2382,17 +2403,6 @@ type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. MachineType string `json:"machineType,omitempty"` - // MaintenanceInterval: Specifies whether this VM may be a stable fleet - // VM. Setting this to "Periodic" designates this VM as a Stable Fleet - // VM. - // - // See go/stable-fleet-ug for more details. - // - // Possible values: - // "AS_NEEDED" - // "PERIODIC" - MaintenanceInterval string `json:"maintenanceInterval,omitempty"` - // MinCpuPlatform: Minimum cpu platform the reservation. MinCpuPlatform string `json:"minCpuPlatform,omitempty"` @@ -3041,6 +3051,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` @@ -3087,6 +3100,7 @@ type AutoscalerAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -3095,6 +3109,7 @@ type AutoscalerAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3242,6 +3257,7 @@ type AutoscalerListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -3250,6 +3266,7 @@ type AutoscalerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3469,6 +3486,7 @@ type AutoscalersScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -3477,6 +3495,7 @@ type AutoscalersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -3688,20 +3707,70 @@ func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { // AutoscalingPolicyCustomMetricUtilization: Custom utilization metric // policy. type AutoscalingPolicyCustomMetricUtilization struct { + // Filter: A filter string, compatible with a Stackdriver Monitoring + // filter string for TimeSeries.list API call. This filter is used to + // select a specific TimeSeries for the purpose of autoscaling and to + // determine whether the metric is exporting per-instance or per-group + // data. + // + // For the filter to be valid for autoscaling purposes, the following + // rules apply: + // - You can only use the AND operator for joining selectors. + // - You can only use direct equality comparison operator (=) without + // any functions for each selector. + // - You can specify the metric in both the filter string and in the + // metric field. However, if specified in both places, the metric must + // be identical. + // - The monitored resource type determines what kind of values are + // expected for the metric. If it is a gce_instance, the autoscaler + // expects the metric to include a separate TimeSeries for each instance + // in a group. In such a case, you cannot filter on resource labels. + // If the resource type is any other value, the autoscaler expects this + // metric to contain values that apply to the entire autoscaled instance + // group and resource label filtering can be performed to point + // autoscaler at the correct TimeSeries to scale upon. This is called a + // per-group metric for the purpose of autoscaling. + // + // If not specified, the type defaults to gce_instance. + // + // You should provide a filter that is selective enough to pick just one + // TimeSeries for the autoscaled group or for each of the instances (if + // you are using gce_instance resource type). If multiple TimeSeries are + // returned upon the query execution, the autoscaler will sum their + // respective values to obtain its scaling value. + Filter string `json:"filter,omitempty"` + // Metric: The identifier (type) of the Stackdriver Monitoring metric. // The metric cannot have negative values. // // The metric must have a value type of INT64 or DOUBLE. Metric string `json:"metric,omitempty"` + // SingleInstanceAssignment: If scaling is based on a per-group metric + // value that represents the total amount of work to be done or resource + // usage, set this value to an amount assigned for a single instance of + // the scaled group. Autoscaler will keep the number of instances + // proportional to the value of this metric, the metric itself should + // not change value due to group resizing. + // + // A good metric to use with the target is for example + // pubsub.googleapis.com/subscription/num_undelivered_messages or a + // custom metric exporting the total number of requests coming to your + // instances. + // + // A bad example would be a metric exporting an average or median + // latency, since this value can't include a chunk assignable to a + // single instance, it could be better used with utilization_target + // instead. + SingleInstanceAssignment float64 `json:"singleInstanceAssignment,omitempty"` + // UtilizationTarget: The target value of the metric that autoscaler // should maintain. This must be a positive value. A utilization metric // scales number of virtual machines handling requests to increase or // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is - // compute.googleapis.com/instance/network/received_bytes_count. The - // autoscaler will work to keep this value constant for each of the + // https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the // instances. UtilizationTarget float64 `json:"utilizationTarget,omitempty"` @@ -3715,7 +3784,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // "GAUGE" UtilizationTargetType string `json:"utilizationTargetType,omitempty"` - // ForceSendFields is a list of field names (e.g. "Metric") to + // ForceSendFields is a list of field names (e.g. "Filter") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -3723,7 +3792,7 @@ type AutoscalingPolicyCustomMetricUtilization struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Metric") to include in API + // NullFields is a list of field names (e.g. "Filter") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -3741,13 +3810,15 @@ func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) func (s *AutoscalingPolicyCustomMetricUtilization) UnmarshalJSON(data []byte) error { type NoMethod AutoscalingPolicyCustomMetricUtilization var s1 struct { - UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` + SingleInstanceAssignment gensupport.JSONFloat64 `json:"singleInstanceAssignment"` + UtilizationTarget gensupport.JSONFloat64 `json:"utilizationTarget"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } + s.SingleInstanceAssignment = float64(s1.SingleInstanceAssignment) s.UtilizationTarget = float64(s1.UtilizationTarget) return nil } @@ -4244,6 +4315,7 @@ type BackendBucketListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -4252,6 +4324,7 @@ type BackendBucketListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -4353,9 +4426,14 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // // (== resource_for {$api_version}.backendService ==) type BackendService struct { - // AffinityCookieTtlSec: If set to 0, the cookie is non-persistent and - // lasts only until the end of the browser session (or equivalent). The - // maximum allowed value is one day (86,400). + // AffinityCookieTtlSec: Lifetime of cookies in seconds. Only applicable + // if the loadBalancingScheme is EXTERNAL, INTERNAL_SELF_MANAGED, or + // INTERNAL_MANAGED, the protocol is HTTP or HTTPS, and the + // sessionAffinity is GENERATED_COOKIE, or HTTP_COOKIE. + // + // If set to 0, the cookie is non-persistent and lasts only until the + // end of the browser session (or equivalent). The maximum allowed value + // is one day (86,400). // // Not supported when the backend service is referenced by a URL map // that is bound to target gRPC proxy that has validateForProxyless @@ -4606,6 +4684,16 @@ type BackendService struct { // policy associated with this backend service. SecurityPolicy string `json:"securityPolicy,omitempty"` + // SecuritySettings: This field specifies the security policy that + // applies to this backend service. This field is applicable to either: + // + // - A regional backend service with the service_protocol set to HTTP, + // HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. + // + // - A global backend service with the load_balancing_scheme set to + // INTERNAL_SELF_MANAGED. + SecuritySettings *SecuritySettings `json:"securitySettings,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -4696,6 +4784,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` @@ -4742,6 +4833,7 @@ type BackendServiceAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -4750,6 +4842,7 @@ type BackendServiceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5099,6 +5192,7 @@ type BackendServiceListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -5107,6 +5201,7 @@ type BackendServiceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5311,6 +5406,7 @@ type BackendServicesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -5319,6 +5415,7 @@ type BackendServicesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5399,6 +5496,10 @@ func (s *BackendServicesScopedListWarningData) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { + // BindingId: A client-specified ID for this binding. Expected to be + // globally unique to support the internal bindings-by-ID API. + BindingId string `json:"bindingId,omitempty"` + // Condition: The condition that is associated with this binding. // // If the condition evaluates to `true`, then this binding applies to @@ -5467,7 +5568,7 @@ type Binding struct { // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "Condition") to + // ForceSendFields is a list of field names (e.g. "BindingId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -5475,7 +5576,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Condition") to include in + // NullFields is a list of field names (e.g. "BindingId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -5768,6 +5869,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` @@ -5814,6 +5918,7 @@ type CommitmentAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -5822,6 +5927,7 @@ type CommitmentAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -5969,6 +6075,7 @@ type CommitmentListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -5977,6 +6084,7 @@ type CommitmentListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6103,6 +6211,7 @@ type CommitmentsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -6111,6 +6220,7 @@ type CommitmentsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -6255,6 +6365,37 @@ func (s *Condition) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ConfidentialInstanceConfig: A set of Confidential Instance options. +type ConfidentialInstanceConfig struct { + // EnableConfidentialCompute: Defines whether the instance should have + // confidential compute enabled. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableConfidentialCompute") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EnableConfidentialCompute") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. However, any field with an empty value appearing in + // NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ConfidentialInstanceConfig) MarshalJSON() ([]byte, error) { + type NoMethod ConfidentialInstanceConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ConnectionDraining: Message containing connection draining // configuration. type ConnectionDraining struct { @@ -6871,6 +7012,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskAggregatedListWarning `json:"warning,omitempty"` @@ -6917,6 +7061,7 @@ type DiskAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -6925,6 +7070,7 @@ type DiskAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7137,6 +7283,7 @@ type DiskListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -7145,6 +7292,7 @@ type DiskListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7381,6 +7529,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` @@ -7427,6 +7578,7 @@ type DiskTypeAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -7435,6 +7587,7 @@ type DiskTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7582,6 +7735,7 @@ type DiskTypeListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -7590,6 +7744,7 @@ type DiskTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7716,6 +7871,7 @@ type DiskTypesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -7724,6 +7880,7 @@ type DiskTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -7934,6 +8091,7 @@ type DisksScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -7942,6 +8100,7 @@ type DisksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8262,6 +8421,7 @@ type ExchangedPeeringRoutesListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -8270,6 +8430,7 @@ type ExchangedPeeringRoutesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -8633,6 +8794,7 @@ type ExternalVpnGatewayListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -8641,6 +8803,7 @@ type ExternalVpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9074,6 +9237,7 @@ type FirewallListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -9082,6 +9246,7 @@ type FirewallListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9285,6 +9450,9 @@ type ForwardingRule struct { // // Must be set to `0.0.0.0` when the target is targetGrpcProxy that has // validateForProxyless field set to true. + // + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, IP address must be provided. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. For protocol @@ -9435,10 +9603,12 @@ type ForwardingRule struct { // Network: This field is not used for external load balancing. // - // For INTERNAL and INTERNAL_SELF_MANAGED load balancing, this field - // identifies the network that the load balanced IP should belong to for - // this Forwarding Rule. If this field is not specified, the default - // network will be used. + // For internal load balancing, this field identifies the network that + // the load balanced IP should belong to for this Forwarding Rule. If + // this field is not specified, the default network will be used. + // + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, a network must be provided. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -9528,7 +9698,7 @@ type ForwardingRule struct { // This field is only used for internal load balancing. ServiceName string `json:"serviceName,omitempty"` - // Subnetwork: This field is only used for INTERNAL load balancing. + // Subnetwork: This field is only used for internal load balancing. // // For internal load balancing, this field identifies the subnetwork // that the load balanced IP should belong to for this Forwarding @@ -9540,12 +9710,25 @@ type ForwardingRule struct { Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched - // traffic. For regional forwarding rules, this target must live in the + // traffic. For regional forwarding rules, this target must be in the // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. For - // INTERNAL_SELF_MANAGED load balancing, only targetHttpProxy and - // targetGrpcProxy are valid, not targetHttpsProxy. + // traffic must be of a type appropriate to the target object. For more + // information, see the "Target" column in [Port + // specifications](/load-balancing/docs/forwarding-rule-concepts#ip_addre + // ss_specifications). + // + // For Private Service Connect forwarding rules that forward traffic to + // Google APIs, provide the name of a supported Google API bundle. + // Currently, the supported Google API bundles include: + // + // + // - vpc-sc - GCP APIs that support VPC Service Controls. For more + // information about which APIs support VPC Service Controls, refer to + // VPC-SC supported products and limitations. + // - all-apis - All GCP APIs. For more information about which APIs are + // supported with this bundle, refer to Private Google Access-specific + // domains and VIPs. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9598,6 +9781,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` @@ -9644,6 +9830,7 @@ type ForwardingRuleAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -9652,6 +9839,7 @@ type ForwardingRuleAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9799,6 +9987,7 @@ type ForwardingRuleListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -9807,6 +9996,7 @@ type ForwardingRuleListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -9960,6 +10150,7 @@ type ForwardingRulesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -9968,6 +10159,7 @@ type ForwardingRulesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10637,14 +10829,24 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionHealthChe // cks) // -// Internal HTTP(S) load balancers must use regional health checks. +// Internal HTTP(S) load balancers must use regional health checks +// (`compute.v1.regionHealthChecks`). +// +// Traffic Director must use global health checks +// (`compute.v1.HealthChecks`). +// // Internal TCP/UDP load balancers can use either regional or global -// health checks. All other types of GCP load balancers and managed -// instance group auto-healing must use global health checks. For more -// information, read Health Check Concepts. +// health checks (`compute.v1.regionHealthChecks` or +// `compute.v1.HealthChecks`). +// +// External HTTP(S), TCP proxy, and SSL proxy load balancers as well as +// managed instance group auto-healing must use global health checks +// (`compute.v1.HealthChecks`). // -// To perform health checks on network load balancers, you must use -// either httpHealthChecks or httpsHealthChecks. +// Network load balancers must use legacy HTTP health checks +// (httpHealthChecks). +// +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -10677,6 +10879,9 @@ type HealthCheck struct { // Kind: Type of the resource. Kind string `json:"kind,omitempty"` + // LogConfig: Configure logging on this health check. + LogConfig *HealthCheckLogConfig `json:"logConfig,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -10818,6 +11023,7 @@ type HealthCheckListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -10826,6 +11032,7 @@ type HealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -10904,6 +11111,36 @@ func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HealthCheckLogConfig: Configuration of logging on a health check. If +// logging is enabled, logs will be exported to Stackdriver. +type HealthCheckLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default, which means no health check logging will be done. + Enable bool `json:"enable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod HealthCheckLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HealthCheckReference: A full or valid partial URL to a health check. // For example, the following are valid URLs: // - @@ -11155,6 +11392,7 @@ type HealthCheckServicesListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -11163,6 +11401,7 @@ type HealthCheckServicesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11263,6 +11502,9 @@ type HealthChecksAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *HealthChecksAggregatedListWarning `json:"warning,omitempty"` @@ -11309,6 +11551,7 @@ type HealthChecksAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -11317,6 +11560,7 @@ type HealthChecksAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11442,6 +11686,7 @@ type HealthChecksScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -11450,6 +11695,7 @@ type HealthChecksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -11635,7 +11881,6 @@ type HostRule struct { // matches any string of ([a-z0-9-.]*). In that case, * must be the // first character and must be followed in the pattern by either - or // .. - // // * based matching is not supported when the URL map is bound to target // gRPC proxy that has validateForProxyless field set to true. Hosts []string `json:"hosts,omitempty"` @@ -11864,6 +12109,15 @@ type HttpHeaderMatch struct { // For matching against the HTTP request's authority, use a headerMatch // with the header name ":authority". // For matching a request's method, use the headerName ":method". + // When the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true, only non-binary + // user-specified custom metadata and the `content-type` header are + // supported. The following transport-level headers cannot be used in + // header matching rules: `:authority`, `:method`, `:path`, `:scheme`, + // `user-agent`, `accept-encoding`, `content-encoding`, + // `grpc-accept-encoding`, `grpc-encoding`, + // `grpc-previous-rpc-attempts`, `grpc-tags-bin`, `grpc-timeout` and + // `grpc-trace-bin. HeaderName string `json:"headerName,omitempty"` // InvertMatch: If set to false, the headerMatch is considered a match @@ -12140,6 +12394,7 @@ type HttpHealthCheckListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -12148,6 +12403,7 @@ type HttpHealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -12423,6 +12679,7 @@ type HttpRouteAction struct { // CorsPolicy: The specification for allowing client side cross-origin // requests. Please see W3C Recommendation for Cross Origin Resource // Sharing + // Not supported when the URL map is bound to target gRPC proxy. CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` // FaultInjectionPolicy: The specification for fault injection @@ -12434,6 +12691,8 @@ type HttpRouteAction struct { // aborted by the Loadbalancer for a percentage of requests. // timeout and retry_policy will be ignored by clients that are // configured with a fault_injection_policy. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` // RequestMirrorPolicy: Specifies the policy on how requests intended @@ -12441,9 +12700,14 @@ type HttpRouteAction struct { // service. Loadbalancer does not wait for responses from the shadow // service. Prior to sending traffic to the shadow service, the host / // authority header is suffixed with -shadow. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. RequestMirrorPolicy *RequestMirrorPolicy `json:"requestMirrorPolicy,omitempty"` - // RetryPolicy: Specifies the retry policy associated with this route. + // RetryPolicy: Specifies the retry policy associated with this + // route. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. RetryPolicy *HttpRetryPolicy `json:"retryPolicy,omitempty"` // Timeout: Specifies the timeout for the selected route. Timeout is @@ -12452,22 +12716,26 @@ type HttpRouteAction struct { // Timeout includes all retries. // If not specified, will use the largest timeout among all backend // services associated with the route. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. Timeout *Duration `json:"timeout,omitempty"` // UrlRewrite: The spec to modify the URL of the request, prior to // forwarding the request to the matched service. // urlRewrite is the only action supported in UrlMaps for external // HTTP(S) load balancers. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` // WeightedBackendServices: A list of weighted backend services to send // traffic to when a route match occurs. The weights determine the // fraction of traffic that flows to their corresponding backend // service. If all traffic needs to go to a single backend service, - // there must be one weightedBackendService with weight set to a non 0 - // number. + // there must be one weightedBackendService with weight set to a + // non-zero number. // Once a backendService is identified and before forwarding the request - // to the backend service, advanced routing actions like Url rewrites + // to the backend service, advanced routing actions such as URL rewrites // and header transformations are applied depending on additional // settings specified in this HttpRouteAction. WeightedBackendServices []*WeightedBackendService `json:"weightedBackendServices,omitempty"` @@ -12512,6 +12780,8 @@ type HttpRouteRule struct { // ServiceWeightAction[].headerAction // Note that headerAction is not supported for Loadbalancers that have // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // MatchRules: The list of criteria for matching attributes of a request @@ -12563,7 +12833,9 @@ type HttpRouteRule struct { // UrlRedirect: When this rule is matched, the request is redirected to // a URL specified by urlRedirect. - // If urlRedirect is specified, service or routeAction must not be set. + // If urlRedirect is specified, service or routeAction must not be + // set. + // Not supported when the URL map is bound to target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -12610,6 +12882,7 @@ type HttpRouteRuleMatch struct { // case sensitive. // The default value is false. // ignoreCase must not be used with regexMatch. + // Not supported when the URL map is bound to target gRPC proxy. IgnoreCase bool `json:"ignoreCase,omitempty"` // MetadataFilters: Opaque filter criteria used by Loadbalancer to @@ -12629,6 +12902,8 @@ type HttpRouteRuleMatch struct { // belongs to. // metadataFilters only applies to Loadbalancers that have their // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` // PrefixMatch: For satisfying the matchRule condition, the request's @@ -12642,6 +12917,7 @@ type HttpRouteRuleMatch struct { // QueryParameterMatches: Specifies a list of query parameter match // criteria, all of which must match corresponding query parameters in // the request. + // Not supported when the URL map is bound to target gRPC proxy. QueryParameterMatches []*HttpQueryParameterMatch `json:"queryParameterMatches,omitempty"` // RegexMatch: For satisfying the matchRule condition, the path of the @@ -12839,6 +13115,7 @@ type HttpsHealthCheckListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -12847,6 +13124,7 @@ type HttpsHealthCheckListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -13253,6 +13531,7 @@ type ImageListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -13261,6 +13540,7 @@ type ImageListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -13389,6 +13669,8 @@ type Instance struct { // Enabling IP Forwarding. CanIpForward bool `json:"canIpForward,omitempty"` + ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` + // CpuPlatform: [Output Only] The CPU platform used by this instance. CpuPlatform string `json:"cpuPlatform,omitempty"` @@ -13454,6 +13736,18 @@ type Instance struct { // by the setLabels method. Labels map[string]string `json:"labels,omitempty"` + // LastStartTimestamp: [Output Only] Last start timestamp in RFC3339 + // text format. + LastStartTimestamp string `json:"lastStartTimestamp,omitempty"` + + // LastStopTimestamp: [Output Only] Last stop timestamp in RFC3339 text + // format. + LastStopTimestamp string `json:"lastStopTimestamp,omitempty"` + + // LastSuspendedTimestamp: [Output Only] Last suspended timestamp in + // RFC3339 text format. + LastSuspendedTimestamp string `json:"lastSuspendedTimestamp,omitempty"` + // MachineType: Full or partial URL of the machine type resource to use // for this instance, in the format: // zones/zone/machineTypes/machine-type. This is provided by the client @@ -13545,7 +13839,9 @@ type Instance struct { // Status: [Output Only] The status of the instance. One of the // following values: PROVISIONING, STAGING, RUNNING, STOPPING, - // SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. + // SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more + // information about the status of the instance, see Instance life + // cycle. // // Possible values: // "DEPROVISIONING" @@ -13627,6 +13923,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` @@ -13673,6 +13972,7 @@ type InstanceAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -13681,6 +13981,7 @@ type InstanceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -13895,6 +14196,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` @@ -13941,6 +14245,7 @@ type InstanceGroupAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -13949,6 +14254,7 @@ type InstanceGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14097,6 +14403,7 @@ type InstanceGroupListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -14105,6 +14412,7 @@ type InstanceGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14434,6 +14742,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` @@ -14480,6 +14791,7 @@ type InstanceGroupManagerAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -14488,6 +14800,7 @@ type InstanceGroupManagerAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -14674,6 +14987,7 @@ type InstanceGroupManagerListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -14682,6 +14996,7 @@ type InstanceGroupManagerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -15083,6 +15398,11 @@ func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, er // InstanceGroupManagersApplyUpdatesRequest: // InstanceGroupManagers.applyUpdatesToInstances type InstanceGroupManagersApplyUpdatesRequest struct { + // AllInstances: Flag to update all instances instead of specified list + // of ?instances?. If the flag is set to true then the instances may not + // be specified in the request. + AllInstances bool `json:"allInstances,omitempty"` + // Instances: The list of URLs of one or more instances for which you // want to apply updates. Each URL can be a full URL or a partial URL, // such as zones/[ZONE]/instances/[INSTANCE_NAME]. @@ -15122,7 +15442,7 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // "RESTART" MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "AllInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15130,10 +15450,10 @@ type InstanceGroupManagersApplyUpdatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AllInstances") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -15372,6 +15692,7 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -15380,6 +15701,7 @@ type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -15569,6 +15891,7 @@ type InstanceGroupManagersScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -15577,6 +15900,7 @@ type InstanceGroupManagersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -15852,6 +16176,7 @@ type InstanceGroupsListInstancesWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -15860,6 +16185,7 @@ type InstanceGroupsListInstancesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16049,6 +16375,7 @@ type InstanceGroupsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -16057,6 +16384,7 @@ type InstanceGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16241,6 +16569,7 @@ type InstanceListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -16249,6 +16578,7 @@ type InstanceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16397,6 +16727,7 @@ type InstanceListReferrersWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -16405,6 +16736,7 @@ type InstanceListReferrersWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -16651,6 +16983,10 @@ type InstanceProperties struct { // information. CanIpForward bool `json:"canIpForward,omitempty"` + // ConfidentialInstanceConfig: Specifies the Confidential Instance + // options. + ConfidentialInstanceConfig *ConfidentialInstanceConfig `json:"confidentialInstanceConfig,omitempty"` + // Description: An optional text description for the instances that are // created from these properties. Description string `json:"description,omitempty"` @@ -16922,6 +17258,7 @@ type InstanceTemplateListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -16930,6 +17267,7 @@ type InstanceTemplateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -17157,6 +17495,7 @@ type InstancesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -17165,6 +17504,7 @@ type InstancesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -17905,6 +18245,9 @@ type InterconnectAttachmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` @@ -17951,6 +18294,7 @@ type InterconnectAttachmentAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -17959,6 +18303,7 @@ type InterconnectAttachmentAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -18109,6 +18454,7 @@ type InterconnectAttachmentListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -18117,6 +18463,7 @@ type InterconnectAttachmentListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -18322,6 +18669,7 @@ type InterconnectAttachmentsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -18330,6 +18678,7 @@ type InterconnectAttachmentsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -18746,6 +19095,7 @@ type InterconnectListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -18754,6 +19104,7 @@ type InterconnectListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -19023,6 +19374,7 @@ type InterconnectLocationListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -19031,6 +19383,7 @@ type InterconnectLocationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -19586,6 +19939,7 @@ type LicensesListResponseWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -19594,6 +19948,7 @@ type LicensesListResponseWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20046,6 +20401,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` @@ -20092,6 +20450,7 @@ type MachineTypeAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -20100,6 +20459,7 @@ type MachineTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20247,6 +20607,7 @@ type MachineTypeListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -20255,6 +20616,7 @@ type MachineTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20381,6 +20743,7 @@ type MachineTypesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -20389,6 +20752,7 @@ type MachineTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -20996,6 +21360,10 @@ type Network struct { // networks. Kind string `json:"kind,omitempty"` + // Mtu: Maximum Transmission Unit in bytes. The minimum value for this + // field is 1460 and the maximum value is 1500 bytes. + Mtu int64 `json:"mtu,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -21243,6 +21611,9 @@ type NetworkEndpointGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` @@ -21289,6 +21660,7 @@ type NetworkEndpointGroupAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -21297,6 +21669,7 @@ type NetworkEndpointGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21386,8 +21759,8 @@ func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, e type NetworkEndpointGroupAppEngine struct { // Service: Optional serving service. // - // The service name must be 1-63 characters long, and comply with - // RFC1035. + // The service name is case-sensitive and must be 1-63 characters + // long. // // Example value: "default", "my-service". Service string `json:"service,omitempty"` @@ -21406,8 +21779,8 @@ type NetworkEndpointGroupAppEngine struct { // Version: Optional serving version. // - // The version must be 1-63 characters long, and comply with - // RFC1035. + // The version name is case-sensitive and must be 1-100 characters + // long. // // Example value: "v1", "v2". Version string `json:"version,omitempty"` @@ -21612,6 +21985,7 @@ type NetworkEndpointGroupListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -21620,6 +21994,7 @@ type NetworkEndpointGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21855,6 +22230,7 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -21863,6 +22239,7 @@ type NetworkEndpointGroupsListNetworkEndpointsWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -21992,6 +22369,7 @@ type NetworkEndpointGroupsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -22000,6 +22378,7 @@ type NetworkEndpointGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22265,6 +22644,7 @@ type NetworkListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -22273,6 +22653,7 @@ type NetworkListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22406,6 +22787,9 @@ type NetworkPeering struct { // network is in the same project as the current network. Network string `json:"network,omitempty"` + // PeerMtu: Maximum Transmission Unit in bytes. + PeerMtu int64 `json:"peerMtu,omitempty"` + // State: [Output Only] State for the peering, either `ACTIVE` or // `INACTIVE`. The peering is `ACTIVE` when there's a matching // configuration in the peer network. @@ -22709,6 +23093,9 @@ type NodeGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeGroupAggregatedListWarning `json:"warning,omitempty"` @@ -22755,6 +23142,7 @@ type NodeGroupAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -22763,6 +23151,7 @@ type NodeGroupAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -22843,6 +23232,7 @@ func (s *NodeGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { type NodeGroupAutoscalingPolicy struct { // MaxNodes: The maximum number of nodes that the group should have. + // Must be set if autoscaling is enabled. Maximum value allowed is 100. MaxNodes int64 `json:"maxNodes,omitempty"` // MinNodes: The minimum number of nodes that the group should have. @@ -22950,6 +23340,7 @@ type NodeGroupListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -22958,6 +23349,7 @@ type NodeGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23218,6 +23610,7 @@ type NodeGroupsListNodesWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -23226,6 +23619,7 @@ type NodeGroupsListNodesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23352,6 +23746,7 @@ type NodeGroupsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -23360,6 +23755,7 @@ type NodeGroupsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23605,6 +24001,9 @@ type NodeTemplateAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeTemplateAggregatedListWarning `json:"warning,omitempty"` @@ -23651,6 +24050,7 @@ type NodeTemplateAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -23659,6 +24059,7 @@ type NodeTemplateAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23806,6 +24207,7 @@ type NodeTemplateListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -23814,6 +24216,7 @@ type NodeTemplateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -23970,6 +24373,7 @@ type NodeTemplatesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -23978,6 +24382,7 @@ type NodeTemplatesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24160,6 +24565,9 @@ type NodeTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *NodeTypeAggregatedListWarning `json:"warning,omitempty"` @@ -24206,6 +24614,7 @@ type NodeTypeAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -24214,6 +24623,7 @@ type NodeTypeAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24361,6 +24771,7 @@ type NodeTypeListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -24369,6 +24780,7 @@ type NodeTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24495,6 +24907,7 @@ type NodeTypesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -24503,6 +24916,7 @@ type NodeTypesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -24777,6 +25191,7 @@ type NotificationEndpointListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -24785,6 +25200,7 @@ type NotificationEndpointListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25094,6 +25510,7 @@ type OperationWarnings struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -25102,6 +25519,7 @@ type OperationWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25203,6 +25621,9 @@ type OperationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *OperationAggregatedListWarning `json:"warning,omitempty"` @@ -25249,6 +25670,7 @@ type OperationAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -25257,6 +25679,7 @@ type OperationAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25404,6 +25827,7 @@ type OperationListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -25412,6 +25836,7 @@ type OperationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25538,6 +25963,7 @@ type OperationsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -25546,6 +25972,7 @@ type OperationsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -25851,6 +26278,9 @@ type PacketMirroringAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *PacketMirroringAggregatedListWarning `json:"warning,omitempty"` @@ -25897,6 +26327,7 @@ type PacketMirroringAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -25905,6 +26336,7 @@ type PacketMirroringAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26122,6 +26554,7 @@ type PacketMirroringListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -26130,6 +26563,7 @@ type PacketMirroringListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26397,6 +26831,7 @@ type PacketMirroringsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -26405,6 +26840,7 @@ type PacketMirroringsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -26498,10 +26934,6 @@ type PathMatcher struct { // set. // UrlMaps for external HTTP(S) load balancers support only the // urlRewrite action within a pathMatcher's defaultRouteAction. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL to the BackendService @@ -26525,10 +26957,6 @@ type PathMatcher struct { // permissions on the specified resource default_service: // - compute.backendBuckets.use // - compute.backendServices.use - // - // pathMatchers[].defaultService is the only option available when the - // URL map is bound to target gRPC proxy that has validateForProxyless - // field set to true. DefaultService string `json:"defaultService,omitempty"` // DefaultUrlRedirect: When none of the specified pathRules or @@ -26536,9 +26964,7 @@ type PathMatcher struct { // defaultUrlRedirect. // If defaultUrlRedirect is specified, defaultService or // defaultRouteAction must not be set. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy. + // Not supported when the URL map is bound to target gRPC proxy. DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` // Description: An optional description of this resource. Provide this @@ -26552,10 +26978,8 @@ type PathMatcher struct { // // Note that headerAction is not supported for Loadbalancers that have // their loadBalancingScheme set to EXTERNAL. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // Name: The name to which this PathMatcher is referred by the HostRule. @@ -26570,10 +26994,6 @@ type PathMatcher struct { // list. // Within a given pathMatcher, only one of pathRules or routeRules must // be set. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. PathRules []*PathRule `json:"pathRules,omitempty"` // RouteRules: The list of HTTP route rules. Use this list instead of @@ -26582,10 +27002,6 @@ type PathMatcher struct { // lowest to highest number. // Within a given pathMatcher, you can set only one of pathRules or // routeRules. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. RouteRules []*HttpRouteRule `json:"routeRules,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultRouteAction") @@ -26645,7 +27061,9 @@ type PathRule struct { // UrlRedirect: When a path pattern is matched, the request is // redirected to a URL specified by urlRedirect. - // If urlRedirect is specified, service or routeAction must not be set. + // If urlRedirect is specified, service or routeAction must not be + // set. + // Not supported when the URL map is bound to target gRPC proxy. UrlRedirect *HttpRedirectAction `json:"urlRedirect,omitempty"` // ForceSendFields is a list of field names (e.g. "Paths") to @@ -27574,6 +27992,7 @@ type RegionAutoscalerListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -27582,6 +28001,7 @@ type RegionAutoscalerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -27729,6 +28149,7 @@ type RegionDiskTypeListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -27737,6 +28158,7 @@ type RegionDiskTypeListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -27968,6 +28390,7 @@ type RegionInstanceGroupListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -27976,6 +28399,7 @@ type RegionInstanceGroupListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -28156,6 +28580,7 @@ type RegionInstanceGroupManagerListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -28164,6 +28589,7 @@ type RegionInstanceGroupManagerListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -28336,6 +28762,11 @@ func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]by // RegionInstanceGroupManagersApplyUpdatesRequest: // RegionInstanceGroupManagers.applyUpdatesToInstances type RegionInstanceGroupManagersApplyUpdatesRequest struct { + // AllInstances: Flag to update all instances instead of specified list + // of ?instances?. If the flag is set to true then the instances may not + // be specified in the request. + AllInstances bool `json:"allInstances,omitempty"` + // Instances: The list of URLs of one or more instances for which you // want to apply updates. Each URL can be a full URL or a partial URL, // such as zones/[ZONE]/instances/[INSTANCE_NAME]. @@ -28375,7 +28806,7 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { // "RESTART" MostDisruptiveAllowedAction string `json:"mostDisruptiveAllowedAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "AllInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -28383,10 +28814,10 @@ type RegionInstanceGroupManagersApplyUpdatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AllInstances") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -28554,6 +28985,7 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -28562,6 +28994,7 @@ type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -28840,6 +29273,7 @@ type RegionInstanceGroupsListInstancesWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -28848,6 +29282,7 @@ type RegionInstanceGroupsListInstancesWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29068,6 +29503,7 @@ type RegionListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -29076,6 +29512,7 @@ type RegionListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29481,6 +29918,9 @@ type ReservationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ReservationAggregatedListWarning `json:"warning,omitempty"` @@ -29527,6 +29967,7 @@ type ReservationAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -29535,6 +29976,7 @@ type ReservationAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29681,6 +30123,7 @@ type ReservationListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -29689,6 +30132,7 @@ type ReservationListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -29843,6 +30287,7 @@ type ReservationsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -29851,6 +30296,7 @@ type ReservationsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30053,6 +30499,7 @@ type ResourcePoliciesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -30061,6 +30508,7 @@ type ResourcePoliciesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30152,7 +30600,7 @@ type ResourcePolicy struct { Description string `json:"description,omitempty"` - // GroupPlacementPolicy: Resource policy for instacnes for placement + // GroupPlacementPolicy: Resource policy for instances for placement // configuration. GroupPlacementPolicy *ResourcePolicyGroupPlacementPolicy `json:"groupPlacementPolicy,omitempty"` @@ -30246,6 +30694,9 @@ type ResourcePolicyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *ResourcePolicyAggregatedListWarning `json:"warning,omitempty"` @@ -30292,6 +30743,7 @@ type ResourcePolicyAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -30300,6 +30752,7 @@ type ResourcePolicyAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30571,6 +31024,7 @@ type ResourcePolicyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -30579,6 +31033,7 @@ type ResourcePolicyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -30774,6 +31229,9 @@ func (s *ResourcePolicySnapshotSchedulePolicySchedule) MarshalJSON() ([]byte, er // ResourcePolicySnapshotSchedulePolicySnapshotProperties: Specified // snapshot properties for scheduled snapshots created by this policy. type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { + // ChainName: Chain name that the snapshot is created in. + ChainName string `json:"chainName,omitempty"` + // GuestFlush: Indication to perform a 'guest aware' snapshot. GuestFlush bool `json:"guestFlush,omitempty"` @@ -30785,7 +31243,7 @@ type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { // snapshot (regional or multi-regional). StorageLocations []string `json:"storageLocations,omitempty"` - // ForceSendFields is a list of field names (e.g. "GuestFlush") to + // ForceSendFields is a list of field names (e.g. "ChainName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -30793,7 +31251,7 @@ type ResourcePolicySnapshotSchedulePolicySnapshotProperties struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "GuestFlush") to include in + // NullFields is a list of field names (e.g. "ChainName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -30931,9 +31389,10 @@ type Route struct { NextHopGateway string `json:"nextHopGateway,omitempty"` // NextHopIlb: The URL to a forwarding rule of type - // loadBalancingScheme=INTERNAL that should handle matching packets. You - // can only specify the forwarding rule as a partial or full URL. For - // example, the following are all valid URLs: + // loadBalancingScheme=INTERNAL that should handle matching packets or + // the IP address of the forwarding Rule. For example, the following are + // all valid URLs: + // - 10.128.0.56 // - // https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule // - regions/region/forwardingRules/forwardingRule @@ -31022,6 +31481,7 @@ type RouteWarnings struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -31030,6 +31490,7 @@ type RouteWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31176,6 +31637,7 @@ type RouteListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -31184,6 +31646,7 @@ type RouteListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31403,6 +31866,9 @@ type RouterAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *RouterAggregatedListWarning `json:"warning,omitempty"` @@ -31449,6 +31915,7 @@ type RouterAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -31457,6 +31924,7 @@ type RouterAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -31826,6 +32294,7 @@ type RouterListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -31834,6 +32303,7 @@ type RouterListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -32377,6 +32847,7 @@ type RoutersScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -32385,6 +32856,7 @@ type RoutersScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -32597,7 +33069,7 @@ func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. NextID: 12 +// Scheduling: Sets the scheduling options for an Instance. NextID: 13 type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be // automatically restarted if it is terminated by Compute Engine (not @@ -32794,12 +33266,13 @@ func (s *SecurityPoliciesWafConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicy: Represents a Cloud Armor Security Policy +// SecurityPolicy: Represents a Google Cloud Armor security policy // resource. // // Only external backend services that use load balancers can reference -// a Security Policy. For more information, read Cloud Armor Security -// Policy Concepts. (== resource_for {$api_version}.securityPolicies ==) +// a security policy. For more information, see Google Cloud Armor +// security policy overview. (== resource_for +// {$api_version}.securityPolicies ==) type SecurityPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -32941,6 +33414,7 @@ type SecurityPolicyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -32949,6 +33423,7 @@ type SecurityPolicyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -33184,6 +33659,59 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SecuritySettings: The authentication and authorization settings for a +// BackendService. +type SecuritySettings struct { + // ClientTlsPolicy: Optional. A URL referring to a + // networksecurity.ClientTlsPolicy resource that describes how clients + // should authenticate with this service's backends. + // clientTlsPolicy only applies to a global BackendService with the + // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // If left blank, communications are not encrypted. + // Note: This field currently has no impact. + ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` + + // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) + // that the client verifies during a mutual TLS handshake with an + // server/endpoint for this BackendService. When the server presents its + // X.509 certificate to the client, the client inspects the + // certificate's subjectAltName field. If the field contains one of the + // specified values, the communication continues. Otherwise, it fails. + // This additional check enables the client to verify that the server is + // authorized to run the requested service. + // Note that the contents of the server certificate's subjectAltName + // field are configured by the Public Key Infrastructure which + // provisions server identities. + // Only applies to a global BackendService with loadBalancingScheme set + // to INTERNAL_SELF_MANAGED. Only applies when BackendService has an + // attached clientTlsPolicy with clientCertificate (mTLS mode). + // Note: This field currently has no impact. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientTlsPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecuritySettings) MarshalJSON() ([]byte, error) { + type NoMethod SecuritySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SerialPortOutput: An instance's serial console output. type SerialPortOutput struct { // Contents: [Output Only] The contents of the console output. @@ -33492,6 +34020,14 @@ type Snapshot struct { // created by applying resource policy on the target disk. AutoCreated bool `json:"autoCreated,omitempty"` + // ChainName: Creates the new snapshot in the snapshot chain labeled + // with the specified name. The chain name must be 1-63 characters long + // and comply with RFC1035. This is an uncommon option only for advanced + // service owners who needs to create separate snapshot chains, for + // example, for chargeback tracking. When you describe your snapshot + // resource, this field is visible only if it has a non-empty value. + ChainName string `json:"chainName,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -33568,8 +34104,7 @@ type Snapshot struct { // key and you do not need to provide a key to use the snapshot later. SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. + // SourceDisk: The source disk used to create this snapshot. SourceDisk string `json:"sourceDisk,omitempty"` // SourceDiskEncryptionKey: The customer-supplied encryption key of the @@ -33709,6 +34244,7 @@ type SnapshotListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -33717,6 +34253,7 @@ type SnapshotListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -33856,9 +34393,10 @@ func (s *SourceInstanceParams) MarshalJSON() ([]byte, error) { // certificates. (== resource_for {$api_version}.sslCertificates ==) (== // resource_for {$api_version}.regionSslCertificates ==) type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. + // Certificate: A value read into memory from a certificate file. The + // certificate file must be in PEM format. The certificate chain must be + // no greater than 5 certs long. The chain must include at least one + // intermediate cert. Certificate string `json:"certificate,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -33892,8 +34430,9 @@ type SslCertificate struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. + // PrivateKey: A value read into memory from a write-only private key + // file. The private key file must be in PEM format. For security, only + // insert requests include this field. PrivateKey string `json:"privateKey,omitempty"` // Region: [Output Only] URL of the region where the regional SSL @@ -33972,6 +34511,9 @@ type SslCertificateAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *SslCertificateAggregatedListWarning `json:"warning,omitempty"` @@ -34018,6 +34560,7 @@ type SslCertificateAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -34026,6 +34569,7 @@ type SslCertificateAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -34173,6 +34717,7 @@ type SslCertificateListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -34181,6 +34726,7 @@ type SslCertificateListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -34387,6 +34933,7 @@ type SslCertificatesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -34395,6 +34942,7 @@ type SslCertificatesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -34541,6 +35089,7 @@ type SslPoliciesListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -34549,6 +35098,7 @@ type SslPoliciesListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -34657,11 +35207,12 @@ func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPolicy: Represents a Cloud Armor Security Policy resource. +// SslPolicy: Represents a Google Cloud Armor security policy +// resource. // // Only external backend services used by HTTP or HTTPS load balancers -// can reference a Security Policy. For more information, read read -// Cloud Armor Security Policy Concepts. (== resource_for +// can reference a security policy. For more information, see Google +// Cloud Armor security policy overview. (== resource_for // {$api_version}.sslPolicies ==) type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -34782,6 +35333,7 @@ type SslPolicyWarnings struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -34790,6 +35342,7 @@ type SslPolicyWarnings struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35192,6 +35745,9 @@ type SubnetworkAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` @@ -35238,6 +35794,7 @@ type SubnetworkAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -35246,6 +35803,7 @@ type SubnetworkAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35393,6 +35951,7 @@ type SubnetworkListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -35401,6 +35960,7 @@ type SubnetworkListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -35686,6 +36246,7 @@ type SubnetworksScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -35694,6 +36255,7 @@ type SubnetworksScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36075,6 +36637,7 @@ type TargetGrpcProxyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -36083,6 +36646,7 @@ type TargetGrpcProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36210,6 +36774,7 @@ type TargetHttpProxiesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -36218,6 +36783,7 @@ type TargetHttpProxiesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36326,6 +36892,15 @@ type TargetHttpProxy struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a TargetHttpProxy. An up-to-date + // fingerprint must be provided in order to patch/update the + // TargetHttpProxy; otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the TargetHttpProxy. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -36343,6 +36918,20 @@ type TargetHttpProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // + // When this field is set to true, Envoy proxies set up inbound traffic + // interception and bind to the IP address and port specified in the + // forwarding rule. This is generally useful when using Traffic Director + // to configure Envoy as a gateway or middle proxy (in other words, not + // a sidecar proxy). The Envoy proxy listens for inbound requests and + // handles requests when it receives them. + // + // The default is false. + ProxyBind bool `json:"proxyBind,omitempty"` + // Region: [Output Only] URL of the region where the regional Target // HTTP Proxy resides. This field is not applicable to global Target // HTTP Proxies. @@ -36407,6 +36996,9 @@ type TargetHttpProxyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -36504,6 +37096,7 @@ type TargetHttpProxyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -36512,6 +37105,7 @@ type TargetHttpProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36639,6 +37233,7 @@ type TargetHttpsProxiesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -36647,6 +37242,7 @@ type TargetHttpsProxiesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -36809,6 +37405,18 @@ func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, err // {$api_version}.targetHttpsProxies ==) (== resource_for // {$api_version}.regionTargetHttpsProxies ==) type TargetHttpsProxy struct { + // AuthorizationPolicy: Optional. A URL referring to a + // networksecurity.AuthorizationPolicy resource that describes how the + // proxy should authorize inbound traffic. If left blank, access will + // not be restricted by an authorization policy. + // Refer to the AuthorizationPolicy resource for additional + // details. + // authorizationPolicy only applies to a global TargetHttpsProxy + // attached to globalForwardingRules with the loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // Note: This field currently has no impact. + AuthorizationPolicy string `json:"authorizationPolicy,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -36834,6 +37442,20 @@ type TargetHttpsProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // ProxyBind: This field only applies when the forwarding rule that + // references this target proxy has a loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // + // When this field is set to true, Envoy proxies set up inbound traffic + // interception and bind to the IP address and port specified in the + // forwarding rule. This is generally useful when using Traffic Director + // to configure Envoy as a gateway or middle proxy (in other words, not + // a sidecar proxy). The Envoy proxy listens for inbound requests and + // handles requests when it receives them. + // + // The default is false. + ProxyBind bool `json:"proxyBind,omitempty"` + // QuicOverride: Specifies the QUIC override policy for this // TargetHttpsProxy resource. This setting determines whether the load // balancer attempts to negotiate QUIC with clients. You can specify @@ -36861,6 +37483,16 @@ type TargetHttpsProxy struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ServerTlsPolicy: Optional. A URL referring to a + // networksecurity.ServerTlsPolicy resource that describes how the proxy + // should authenticate inbound traffic. + // serverTlsPolicy only applies to a global TargetHttpsProxy attached to + // globalForwardingRules with the loadBalancingScheme set to + // INTERNAL_SELF_MANAGED. + // If left blank, communications are not encrypted. + // Note: This field currently has no impact. + ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` + // SslCertificates: URLs to SslCertificate resources that are used to // authenticate connections between users and the load balancer. At // least one SSL certificate must be specified. Currently, you may @@ -36885,7 +37517,7 @@ type TargetHttpsProxy struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "AuthorizationPolicy") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -36893,7 +37525,7 @@ type TargetHttpsProxy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "AuthorizationPolicy") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -36933,6 +37565,9 @@ type TargetHttpsProxyAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetHttpsProxyAggregatedListWarning `json:"warning,omitempty"` @@ -36979,6 +37614,7 @@ type TargetHttpsProxyAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -36987,6 +37623,7 @@ type TargetHttpsProxyAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37135,6 +37772,7 @@ type TargetHttpsProxyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -37143,6 +37781,7 @@ type TargetHttpsProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37329,6 +37968,9 @@ type TargetInstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` @@ -37375,6 +38017,7 @@ type TargetInstanceAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -37383,6 +38026,7 @@ type TargetInstanceAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37530,6 +38174,7 @@ type TargetInstanceListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -37538,6 +38183,7 @@ type TargetInstanceListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37664,6 +38310,7 @@ type TargetInstancesScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -37672,6 +38319,7 @@ type TargetInstancesScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -37758,10 +38406,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { // more information, read Using target pools. (== resource_for // {$api_version}.targetPools ==) type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. + // BackupPool: The server-defined URL for the resource. This field is + // applicable only when the containing target pool is serving a + // forwarding rule as the primary pool, and its failoverRatio field is + // properly set to a value between [0, 1]. // // backupPool and failoverRatio together define the fallback behavior of // the primary target pool: if the ratio of the healthy instances in the @@ -37923,6 +38571,9 @@ type TargetPoolAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` @@ -37969,6 +38620,7 @@ type TargetPoolAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -37977,6 +38629,7 @@ type TargetPoolAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38159,6 +38812,7 @@ type TargetPoolListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -38167,6 +38821,7 @@ type TargetPoolListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38411,6 +39066,7 @@ type TargetPoolsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -38419,6 +39075,7 @@ type TargetPoolsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -38769,6 +39426,7 @@ type TargetSslProxyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -38777,6 +39435,7 @@ type TargetSslProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39060,6 +39719,7 @@ type TargetTcpProxyListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -39068,6 +39728,7 @@ type TargetTcpProxyListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39260,6 +39921,9 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` @@ -39306,6 +39970,7 @@ type TargetVpnGatewayAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -39314,6 +39979,7 @@ type TargetVpnGatewayAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39462,6 +40128,7 @@ type TargetVpnGatewayListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -39470,6 +40137,7 @@ type TargetVpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39597,6 +40265,7 @@ type TargetVpnGatewaysScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -39605,6 +40274,7 @@ type TargetVpnGatewaysScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -39684,12 +40354,18 @@ func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { } type TestFailure struct { + // ActualService: BackendService or BackendBucket returned by load + // balancer. ActualService string `json:"actualService,omitempty"` + // ExpectedService: Expected BackendService or BackendBucket resource + // the given URL should be mapped to. ExpectedService string `json:"expectedService,omitempty"` + // Host: Host portion of the URL. Host string `json:"host,omitempty"` + // Path: Path portion including query parameters in the URL. Path string `json:"path,omitempty"` // ForceSendFields is a list of field names (e.g. "ActualService") to @@ -39823,10 +40499,8 @@ type UrlMap struct { // set. // UrlMaps for external HTTP(S) load balancers support only the // urlRewrite action within defaultRouteAction. - // - // defaultRouteAction has no effect when the backend service is - // referenced by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // defaultRouteAction has no effect when the URL map is bound to target + // gRPC proxy that has validateForProxyless field set to true. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL of the defaultService @@ -39838,21 +40512,16 @@ type UrlMap struct { // Conversely, if routeAction specifies any weightedBackendServices, // service must not be specified. // Only one of defaultService, defaultUrlRedirect or - // defaultRouteAction.weightedBackendService must be - // set. - // - // defaultService has no effect when the backend service is referenced - // by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // defaultRouteAction.weightedBackendService must be set. + // defaultService has no effect when the URL map is bound to target gRPC + // proxy that has validateForProxyless field set to true. DefaultService string `json:"defaultService,omitempty"` // DefaultUrlRedirect: When none of the specified hostRules match, the // request is redirected to a URL specified by defaultUrlRedirect. // If defaultUrlRedirect is specified, defaultService or // defaultRouteAction must not be set. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy. + // Not supported when the URL map is bound to target gRPC proxy. DefaultUrlRedirect *HttpRedirectAction `json:"defaultUrlRedirect,omitempty"` // Description: An optional description of this resource. Provide this @@ -39873,10 +40542,10 @@ type UrlMap struct { // need to take effect for the selected backendService. // The headerAction specified here take effect after headerAction // specified under pathMatcher. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // HostRules: The list of HostRules to use against the URL. @@ -39914,10 +40583,8 @@ type UrlMap struct { // Tests: The list of expected URL mapping tests. Request to update this // UrlMap will succeed only if all of the test cases pass. You can // specify a maximum of 100 tests per UrlMap. - // - // Not supported when the backend service is referenced by a URL map - // that is bound to target gRPC proxy that has validateForProxyless - // field set to true. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. Tests []*UrlMapTest `json:"tests,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -40016,6 +40683,7 @@ type UrlMapListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -40024,6 +40692,7 @@ type UrlMapListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40133,14 +40802,16 @@ type UrlMapTest struct { // Description: Description of this test case. Description string `json:"description,omitempty"` - // Host: Host portion of the URL. + // Host: Host portion of the URL. If headers contains a host header, + // then host must also match the header value. Host string `json:"host,omitempty"` // Path: Path portion of the URL. Path string `json:"path,omitempty"` - // Service: Expected BackendService resource the given URL should be - // mapped to. + // Service: Expected BackendService or BackendBucket resource the given + // URL should be mapped to. + // service cannot be set if expectedRedirectResponseCode is set. Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -40227,6 +40898,9 @@ type UrlMapsAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *UrlMapsAggregatedListWarning `json:"warning,omitempty"` @@ -40273,6 +40947,7 @@ type UrlMapsAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -40281,6 +40956,7 @@ type UrlMapsAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40406,6 +41082,7 @@ type UrlMapsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -40414,6 +41091,7 @@ type UrlMapsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -40732,6 +41410,7 @@ type UsableSubnetworksAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -40740,6 +41419,7 @@ type UsableSubnetworksAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41022,6 +41702,7 @@ type VmEndpointNatMappingsListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -41030,6 +41711,7 @@ type VmEndpointNatMappingsListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41224,6 +41906,9 @@ type VpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *VpnGatewayAggregatedListWarning `json:"warning,omitempty"` @@ -41270,6 +41955,7 @@ type VpnGatewayAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -41278,6 +41964,7 @@ type VpnGatewayAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41425,6 +42112,7 @@ type VpnGatewayListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -41433,6 +42121,7 @@ type VpnGatewayListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -41777,6 +42466,7 @@ type VpnGatewaysScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -41785,6 +42475,7 @@ type VpnGatewaysScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42066,6 +42757,9 @@ type VpnTunnelAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` @@ -42112,6 +42806,7 @@ type VpnTunnelAggregatedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -42120,6 +42815,7 @@ type VpnTunnelAggregatedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42267,6 +42963,7 @@ type VpnTunnelListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -42275,6 +42972,7 @@ type VpnTunnelListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42400,6 +43098,7 @@ type VpnTunnelsScopedListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -42408,6 +43107,7 @@ type VpnTunnelsScopedListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42572,6 +43272,10 @@ type WeightedBackendService struct { // need to take effect for the selected backendService. // headerAction specified here take effect before headerAction in the // enclosing HttpRouteRule, PathMatcher and UrlMap. + // Note that headerAction is not supported for Loadbalancers that have + // their loadBalancingScheme set to EXTERNAL. + // Not supported when the URL map is bound to target gRPC proxy that has + // validateForProxyless field set to true. HeaderAction *HttpHeaderAction `json:"headerAction,omitempty"` // Weight: Specifies the fraction of traffic sent to backendService, @@ -42676,6 +43380,7 @@ type XpnHostListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -42684,6 +43389,7 @@ type XpnHostListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -42943,6 +43649,7 @@ type ZoneListWarning struct { // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "LARGE_DEPLOYMENT_WARNING" // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -42951,6 +43658,7 @@ type ZoneListWarning struct { // "NEXT_HOP_NOT_RUNNING" // "NOT_CRITICAL_ERROR" // "NO_RESULTS_ON_PAGE" + // "PARTIAL_SUCCESS" // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" @@ -43199,6 +43907,15 @@ func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *Accele return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43236,7 +43953,7 @@ func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43339,6 +44056,11 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/acceleratorTypes", @@ -43434,7 +44156,7 @@ func (c *AcceleratorTypesGetCall) Header() http.Header { func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43628,6 +44350,15 @@ func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypes return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AcceleratorTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AcceleratorTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43665,7 +44396,7 @@ func (c *AcceleratorTypesListCall) Header() http.Header { func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -43766,6 +44497,11 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -43905,6 +44641,15 @@ func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43942,7 +44687,7 @@ func (c *AddressesAggregatedListCall) Header() http.Header { func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44045,6 +44790,11 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/addresses", @@ -44149,7 +44899,7 @@ func (c *AddressesDeleteCall) Header() http.Header { func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44318,7 +45068,7 @@ func (c *AddressesGetCall) Header() http.Header { func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44495,7 +45245,7 @@ func (c *AddressesInsertCall) Header() http.Header { func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44690,6 +45440,15 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AddressesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -44727,7 +45486,7 @@ func (c *AddressesListCall) Header() http.Header { func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -44834,6 +45593,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/addresses", @@ -44966,6 +45730,15 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45003,7 +45776,7 @@ func (c *AutoscalersAggregatedListCall) Header() http.Header { func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45106,6 +45879,11 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/autoscalers", @@ -45209,7 +45987,7 @@ func (c *AutoscalersDeleteCall) Header() http.Header { func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45378,7 +46156,7 @@ func (c *AutoscalersGetCall) Header() http.Header { func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45554,7 +46332,7 @@ func (c *AutoscalersInsertCall) Header() http.Header { func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45748,6 +46526,15 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *AutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *AutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45785,7 +46572,7 @@ func (c *AutoscalersListCall) Header() http.Header { func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -45886,6 +46673,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -46004,7 +46796,7 @@ func (c *AutoscalersPatchCall) Header() http.Header { func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46193,7 +46985,7 @@ func (c *AutoscalersUpdateCall) Header() http.Header { func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46375,7 +47167,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46547,7 +47339,7 @@ func (c *BackendBucketsDeleteCall) Header() http.Header { func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46714,7 +47506,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -46878,7 +47670,7 @@ func (c *BackendBucketsGetCall) Header() http.Header { func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47043,7 +47835,7 @@ func (c *BackendBucketsInsertCall) Header() http.Header { func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47226,6 +48018,15 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendBucketsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendBucketsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47263,7 +48064,7 @@ func (c *BackendBucketsListCall) Header() http.Header { func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47361,6 +48162,11 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/backendBuckets", @@ -47466,7 +48272,7 @@ func (c *BackendBucketsPatchCall) Header() http.Header { func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47642,7 +48448,7 @@ func (c *BackendBucketsUpdateCall) Header() http.Header { func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -47818,7 +48624,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48022,6 +48828,15 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -48059,7 +48874,7 @@ func (c *BackendServicesAggregatedListCall) Header() http.Header { func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48162,6 +48977,11 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/backendServices", @@ -48264,7 +49084,7 @@ func (c *BackendServicesDeleteCall) Header() http.Header { func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48431,7 +49251,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48596,7 +49416,7 @@ func (c *BackendServicesGetCall) Header() http.Header { func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48749,7 +49569,7 @@ func (c *BackendServicesGetHealthCall) Header() http.Header { func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -48920,7 +49740,7 @@ func (c *BackendServicesInsertCall) Header() http.Header { func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49104,6 +49924,15 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *BackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -49141,7 +49970,7 @@ func (c *BackendServicesListCall) Header() http.Header { func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49239,6 +50068,11 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/backendServices", @@ -49346,7 +50180,7 @@ func (c *BackendServicesPatchCall) Header() http.Header { func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49522,7 +50356,7 @@ func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49699,7 +50533,7 @@ func (c *BackendServicesUpdateCall) Header() http.Header { func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -49904,6 +50738,15 @@ func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -49941,7 +50784,7 @@ func (c *DiskTypesAggregatedListCall) Header() http.Header { func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50044,6 +50887,11 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/diskTypes", @@ -50141,7 +50989,7 @@ func (c *DiskTypesGetCall) Header() http.Header { func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50336,6 +51184,15 @@ func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -50373,7 +51230,7 @@ func (c *DiskTypesListCall) Header() http.Header { func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50474,6 +51331,11 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -50587,7 +51449,7 @@ func (c *DisksAddResourcePoliciesCall) Header() http.Header { func (c *DisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50801,6 +51663,15 @@ func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -50838,7 +51709,7 @@ func (c *DisksAggregatedListCall) Header() http.Header { func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -50941,6 +51812,11 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/disks", @@ -51002,7 +51878,7 @@ func (r *DisksService) CreateSnapshot(project string, zone string, disk string, } // GuestFlush sets the optional parameter "guestFlush": [Input Only] -// Specifies to create an application consistent snapshot by informing +// Whether to attempt an application consistent snapshot by informing // the OS to prepare for the snapshot process. Currently only supported // on Windows instances using the Volume Shadow Copy Service (VSS). func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { @@ -51056,7 +51932,7 @@ func (c *DisksCreateSnapshotCall) Header() http.Header { func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51139,7 +52015,7 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "guestFlush": { - // "description": "[Input Only] Specifies to create an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", + // "description": "[Input Only] Whether to attempt an application consistent snapshot by informing the OS to prepare for the snapshot process. Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS).", // "location": "query", // "type": "boolean" // }, @@ -51249,7 +52125,7 @@ func (c *DisksDeleteCall) Header() http.Header { func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51418,7 +52294,7 @@ func (c *DisksGetCall) Header() http.Header { func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51593,7 +52469,7 @@ func (c *DisksGetIamPolicyCall) Header() http.Header { func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51787,7 +52663,7 @@ func (c *DisksInsertCall) Header() http.Header { func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -51987,6 +52863,15 @@ func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *DisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *DisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52024,7 +52909,7 @@ func (c *DisksListCall) Header() http.Header { func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52125,6 +53010,11 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -52236,7 +53126,7 @@ func (c *DisksRemoveResourcePoliciesCall) Header() http.Header { func (c *DisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52423,7 +53313,7 @@ func (c *DisksResizeCall) Header() http.Header { func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52591,7 +53481,7 @@ func (c *DisksSetIamPolicyCall) Header() http.Header { func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52773,7 +53663,7 @@ func (c *DisksSetLabelsCall) Header() http.Header { func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -52941,7 +53831,7 @@ func (c *DisksTestIamPermissionsCall) Header() http.Header { func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53119,7 +54009,7 @@ func (c *ExternalVpnGatewaysDeleteCall) Header() http.Header { func (c *ExternalVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53277,7 +54167,7 @@ func (c *ExternalVpnGatewaysGetCall) Header() http.Header { func (c *ExternalVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53442,7 +54332,7 @@ func (c *ExternalVpnGatewaysInsertCall) Header() http.Header { func (c *ExternalVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53625,6 +54515,15 @@ func (c *ExternalVpnGatewaysListCall) PageToken(pageToken string) *ExternalVpnGa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ExternalVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ExternalVpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -53662,7 +54561,7 @@ func (c *ExternalVpnGatewaysListCall) Header() http.Header { func (c *ExternalVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53760,6 +54659,11 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/externalVpnGateways", @@ -53845,7 +54749,7 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Header() http.Header { func (c *ExternalVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -53997,7 +54901,7 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *ExternalVpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54167,7 +55071,7 @@ func (c *FirewallsDeleteCall) Header() http.Header { func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54325,7 +55229,7 @@ func (c *FirewallsGetCall) Header() http.Header { func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54491,7 +55395,7 @@ func (c *FirewallsInsertCall) Header() http.Header { func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54675,6 +55579,15 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *FirewallsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FirewallsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -54712,7 +55625,7 @@ func (c *FirewallsListCall) Header() http.Header { func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -54810,6 +55723,11 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/firewalls", @@ -54916,7 +55834,7 @@ func (c *FirewallsPatchCall) Header() http.Header { func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55095,7 +56013,7 @@ func (c *FirewallsUpdateCall) Header() http.Header { func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55300,6 +56218,15 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55337,7 +56264,7 @@ func (c *ForwardingRulesAggregatedListCall) Header() http.Header { func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55440,6 +56367,11 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/forwardingRules", @@ -55544,7 +56476,7 @@ func (c *ForwardingRulesDeleteCall) Header() http.Header { func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55713,7 +56645,7 @@ func (c *ForwardingRulesGetCall) Header() http.Header { func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -55890,7 +56822,7 @@ func (c *ForwardingRulesInsertCall) Header() http.Header { func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56085,6 +57017,15 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ForwardingRulesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -56122,7 +57063,7 @@ func (c *ForwardingRulesListCall) Header() http.Header { func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56229,6 +57170,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/forwardingRules", @@ -56337,7 +57283,7 @@ func (c *ForwardingRulesPatchCall) Header() http.Header { func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56525,7 +57471,7 @@ func (c *ForwardingRulesSetTargetCall) Header() http.Header { func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56708,7 +57654,7 @@ func (c *GlobalAddressesDeleteCall) Header() http.Header { func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -56867,7 +57813,7 @@ func (c *GlobalAddressesGetCall) Header() http.Header { func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57033,7 +57979,7 @@ func (c *GlobalAddressesInsertCall) Header() http.Header { func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57216,6 +58162,15 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalAddressesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalAddressesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57253,7 +58208,7 @@ func (c *GlobalAddressesListCall) Header() http.Header { func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57351,6 +58306,11 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/addresses", @@ -57453,7 +58413,7 @@ func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57612,7 +58572,7 @@ func (c *GlobalForwardingRulesGetCall) Header() http.Header { func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57778,7 +58738,7 @@ func (c *GlobalForwardingRulesInsertCall) Header() http.Header { func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -57962,6 +58922,15 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalForwardingRulesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalForwardingRulesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57999,7 +58968,7 @@ func (c *GlobalForwardingRulesListCall) Header() http.Header { func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58097,6 +59066,11 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/forwardingRules", @@ -58203,7 +59177,7 @@ func (c *GlobalForwardingRulesPatchCall) Header() http.Header { func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58380,7 +59354,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58556,7 +59530,7 @@ func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58729,7 +59703,7 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -58896,7 +59870,7 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.He func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59061,7 +60035,7 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59225,7 +60199,7 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59408,6 +60382,15 @@ func (c *GlobalNetworkEndpointGroupsListCall) PageToken(pageToken string) *Globa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59445,7 +60428,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Header() http.Header { func (c *GlobalNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59543,6 +60526,11 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networkEndpointGroups", @@ -59664,6 +60652,15 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59691,7 +60688,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Head func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -59796,6 +60793,11 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", @@ -59929,6 +60931,15 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59966,7 +60977,7 @@ func (c *GlobalOperationsAggregatedListCall) Header() http.Header { func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60069,6 +61080,11 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/operations", @@ -60152,7 +61168,7 @@ func (c *GlobalOperationsDeleteCall) Header() http.Header { func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60278,7 +61294,7 @@ func (c *GlobalOperationsGetCall) Header() http.Header { func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60462,6 +61478,15 @@ func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperations return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *GlobalOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *GlobalOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60499,7 +61524,7 @@ func (c *GlobalOperationsListCall) Header() http.Header { func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60597,6 +61622,11 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/operations", @@ -60692,7 +61722,7 @@ func (c *GlobalOperationsWaitCall) Header() http.Header { func (c *GlobalOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -60885,6 +61915,15 @@ func (c *HealthChecksAggregatedListCall) PageToken(pageToken string) *HealthChec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60922,7 +61961,7 @@ func (c *HealthChecksAggregatedListCall) Header() http.Header { func (c *HealthChecksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61025,6 +62064,11 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/healthChecks", @@ -61126,7 +62170,7 @@ func (c *HealthChecksDeleteCall) Header() http.Header { func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61284,7 +62328,7 @@ func (c *HealthChecksGetCall) Header() http.Header { func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61449,7 +62493,7 @@ func (c *HealthChecksInsertCall) Header() http.Header { func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61632,6 +62676,15 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -61669,7 +62722,7 @@ func (c *HealthChecksListCall) Header() http.Header { func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -61767,6 +62820,11 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/healthChecks", @@ -61872,7 +62930,7 @@ func (c *HealthChecksPatchCall) Header() http.Header { func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62048,7 +63106,7 @@ func (c *HealthChecksUpdateCall) Header() http.Header { func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62222,7 +63280,7 @@ func (c *HttpHealthChecksDeleteCall) Header() http.Header { func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62381,7 +63439,7 @@ func (c *HttpHealthChecksGetCall) Header() http.Header { func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62547,7 +63605,7 @@ func (c *HttpHealthChecksInsertCall) Header() http.Header { func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62731,6 +63789,15 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HttpHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62768,7 +63835,7 @@ func (c *HttpHealthChecksListCall) Header() http.Header { func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -62866,6 +63933,11 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/httpHealthChecks", @@ -62972,7 +64044,7 @@ func (c *HttpHealthChecksPatchCall) Header() http.Header { func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63149,7 +64221,7 @@ func (c *HttpHealthChecksUpdateCall) Header() http.Header { func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63322,7 +64394,7 @@ func (c *HttpsHealthChecksDeleteCall) Header() http.Header { func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63480,7 +64552,7 @@ func (c *HttpsHealthChecksGetCall) Header() http.Header { func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63645,7 +64717,7 @@ func (c *HttpsHealthChecksInsertCall) Header() http.Header { func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63828,6 +64900,15 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *HttpsHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *HttpsHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63865,7 +64946,7 @@ func (c *HttpsHealthChecksListCall) Header() http.Header { func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -63963,6 +65044,11 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/httpsHealthChecks", @@ -64068,7 +65154,7 @@ func (c *HttpsHealthChecksPatchCall) Header() http.Header { func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64244,7 +65330,7 @@ func (c *HttpsHealthChecksUpdateCall) Header() http.Header { func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64418,7 +65504,7 @@ func (c *ImagesDeleteCall) Header() http.Header { func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64589,7 +65675,7 @@ func (c *ImagesDeprecateCall) Header() http.Header { func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64756,7 +65842,7 @@ func (c *ImagesGetCall) Header() http.Header { func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -64913,7 +65999,7 @@ func (c *ImagesGetFromFamilyCall) Header() http.Header { func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65077,7 +66163,7 @@ func (c *ImagesGetIamPolicyCall) Header() http.Header { func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65256,7 +66342,7 @@ func (c *ImagesInsertCall) Header() http.Header { func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65453,6 +66539,15 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ImagesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ImagesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -65490,7 +66585,7 @@ func (c *ImagesListCall) Header() http.Header { func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65588,6 +66683,11 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/images", @@ -65624,6 +66724,183 @@ func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) er } } +// method id "compute.images.patch": + +type ImagesPatchCall struct { + s *Service + project string + image string + image2 *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified image with the data included in the +// request. Only the following fields can be modified: family, +// description, deprecation status. +func (r *ImagesService) Patch(project string, image string, image2 *Image) *ImagesPatchCall { + c := &ImagesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.image2 = image2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesPatchCall) RequestId(requestId string) *ImagesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesPatchCall) Fields(s ...googleapi.Field) *ImagesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesPatchCall) Context(ctx context.Context) *ImagesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified image with the data included in the request. Only the following fields can be modified: family, description, deprecation status.", + // "httpMethod": "PATCH", + // "id": "compute.images.patch", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "request": { + // "$ref": "Image" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.images.setIamPolicy": type ImagesSetIamPolicyCall struct { @@ -65673,7 +66950,7 @@ func (c *ImagesSetIamPolicyCall) Header() http.Header { func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65825,7 +67102,7 @@ func (c *ImagesSetLabelsCall) Header() http.Header { func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -65977,7 +67254,7 @@ func (c *ImagesTestIamPermissionsCall) Header() http.Header { func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66166,7 +67443,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66378,6 +67655,15 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -66415,7 +67701,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66519,6 +67805,11 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/instanceGroupManagers", @@ -66607,7 +67898,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66790,7 +68081,7 @@ func (c *InstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -66975,7 +68266,7 @@ func (c *InstanceGroupManagersDeleteCall) Header() http.Header { func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67166,7 +68457,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67332,7 +68623,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67503,7 +68794,7 @@ func (c *InstanceGroupManagersGetCall) Header() http.Header { func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67685,7 +68976,7 @@ func (c *InstanceGroupManagersInsertCall) Header() http.Header { func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -67878,6 +69169,15 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -67915,7 +69215,7 @@ func (c *InstanceGroupManagersListCall) Header() http.Header { func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68016,6 +69316,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -68146,6 +69451,15 @@ func (c *InstanceGroupManagersListErrorsCall) PageToken(pageToken string) *Insta return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -68183,7 +69497,7 @@ func (c *InstanceGroupManagersListErrorsCall) Header() http.Header { func (c *InstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68293,6 +69607,11 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", @@ -68426,6 +69745,15 @@ func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken stri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -68453,7 +69781,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68561,6 +69889,11 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -68690,6 +70023,15 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken st return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -68717,7 +70059,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -68825,6 +70167,11 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", @@ -68941,7 +70288,7 @@ func (c *InstanceGroupManagersPatchCall) Header() http.Header { func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69127,7 +70474,7 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69325,7 +70672,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69528,7 +70875,7 @@ func (c *InstanceGroupManagersResizeCall) Header() http.Header { func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69714,7 +71061,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -69904,7 +71251,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70090,7 +71437,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70276,7 +71623,7 @@ func (c *InstanceGroupsAddInstancesCall) Header() http.Header { func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70488,6 +71835,15 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -70525,7 +71881,7 @@ func (c *InstanceGroupsAggregatedListCall) Header() http.Header { func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70628,6 +71984,11 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/instanceGroups", @@ -70734,7 +72095,7 @@ func (c *InstanceGroupsDeleteCall) Header() http.Header { func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70854,8 +72215,11 @@ type InstanceGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified instance group. Gets a list of available -// instance groups by making a list() request. +// Get: Returns the specified zonal instance group. Get a list of +// available zonal instance groups by making a list() request. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -70901,7 +72265,7 @@ func (c *InstanceGroupsGetCall) Header() http.Header { func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -70965,7 +72329,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Gets a list of available instance groups by making a list() request.", + // "description": "Returns the specified zonal instance group. Get a list of available zonal instance groups by making a list() request.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.get", // "parameterOrder": [ @@ -71075,7 +72439,7 @@ func (c *InstanceGroupsInsertCall) Header() http.Header { func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71194,8 +72558,11 @@ type InstanceGroupsListCall struct { header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. +// List: Retrieves the list of zonal instance group resources contained +// within the specified zone. +// +// For managed instance groups, use the instanceGroupManagers or +// regionInstanceGroupManagers methods instead. func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -71268,6 +72635,15 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -71305,7 +72681,7 @@ func (c *InstanceGroupsListCall) Header() http.Header { func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71368,7 +72744,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of zonal instance group resources contained within the specified zone.\n\nFor managed instance groups, use the instanceGroupManagers or regionInstanceGroupManagers methods instead.", // "httpMethod": "GET", // "id": "compute.instanceGroups.list", // "parameterOrder": [ @@ -71406,6 +72782,11 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -71536,6 +72917,15 @@ func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceG return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -71563,7 +72953,7 @@ func (c *InstanceGroupsListInstancesCall) Header() http.Header { func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71674,6 +73064,11 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -71792,7 +73187,7 @@ func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -71976,7 +73371,7 @@ func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72159,7 +73554,7 @@ func (c *InstanceTemplatesDeleteCall) Header() http.Header { func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72318,7 +73713,7 @@ func (c *InstanceTemplatesGetCall) Header() http.Header { func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72482,7 +73877,7 @@ func (c *InstanceTemplatesGetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72657,7 +74052,7 @@ func (c *InstanceTemplatesInsertCall) Header() http.Header { func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72841,6 +74236,15 @@ func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -72878,7 +74282,7 @@ func (c *InstanceTemplatesListCall) Header() http.Header { func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -72976,6 +74380,11 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/instanceTemplates", @@ -73061,7 +74470,7 @@ func (c *InstanceTemplatesSetIamPolicyCall) Header() http.Header { func (c *InstanceTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73213,7 +74622,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73389,7 +74798,7 @@ func (c *InstancesAddAccessConfigCall) Header() http.Header { func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73584,7 +74993,7 @@ func (c *InstancesAddResourcePoliciesCall) Header() http.Header { func (c *InstancesAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73799,6 +75208,15 @@ func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -73836,7 +75254,7 @@ func (c *InstancesAggregatedListCall) Header() http.Header { func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -73939,6 +75357,11 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/instances", @@ -74004,7 +75427,8 @@ func (r *InstancesService) AttachDisk(project string, zone string, instance stri // ForceAttach sets the optional parameter "forceAttach": Whether to // force attach the regional disk even if it's currently attached to -// another instance. +// another instance. If you try to force attach a zonal disk to an +// instance, you will receive an error. func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c @@ -74056,7 +75480,7 @@ func (c *InstancesAttachDiskCall) Header() http.Header { func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74132,7 +75556,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "forceAttach": { - // "description": "Whether to force attach the regional disk even if it's currently attached to another instance.", + // "description": "Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", // "location": "query", // "type": "boolean" // }, @@ -74247,7 +75671,7 @@ func (c *InstancesDeleteCall) Header() http.Header { func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74427,7 +75851,7 @@ func (c *InstancesDeleteAccessConfigCall) Header() http.Header { func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74619,7 +76043,7 @@ func (c *InstancesDetachDiskCall) Header() http.Header { func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74796,7 +76220,7 @@ func (c *InstancesGetCall) Header() http.Header { func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -74977,7 +76401,7 @@ func (c *InstancesGetGuestAttributesCall) Header() http.Header { func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75162,7 +76586,7 @@ func (c *InstancesGetIamPolicyCall) Header() http.Header { func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75335,7 +76759,7 @@ func (c *InstancesGetScreenshotCall) Header() http.Header { func (c *InstancesGetScreenshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75533,7 +76957,7 @@ func (c *InstancesGetSerialPortOutputCall) Header() http.Header { func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75716,7 +77140,7 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Header() http.Header { func (c *InstancesGetShieldedInstanceIdentityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -75908,7 +77332,7 @@ func (c *InstancesInsertCall) Header() http.Header { func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76108,6 +77532,15 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -76145,7 +77578,7 @@ func (c *InstancesListCall) Header() http.Header { func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76246,6 +77679,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -76303,9 +77741,9 @@ type InstancesListReferrersCall struct { // ListReferrers: Retrieves a list of resources that refer to the VM // instance specified in the request. For example, if the VM instance is -// part of a managed instance group, the referrers list includes the -// managed instance group. For more information, read Viewing Referrers -// to VM Instances. +// part of a managed or unmanaged instance group, the referrers list +// includes the instance group. For more information, read Viewing +// referrers to VM instances. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76379,6 +77817,15 @@ func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListR return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InstancesListReferrersCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstancesListReferrersCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -76416,7 +77863,7 @@ func (c *InstancesListReferrersCall) Header() http.Header { func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76480,7 +77927,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed instance group, the referrers list includes the managed instance group. For more information, read Viewing Referrers to VM Instances.", + // "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed or unmanaged instance group, the referrers list includes the instance group. For more information, read Viewing referrers to VM instances.", // "httpMethod": "GET", // "id": "compute.instances.listReferrers", // "parameterOrder": [ @@ -76526,6 +77973,11 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -76637,7 +78089,7 @@ func (c *InstancesRemoveResourcePoliciesCall) Header() http.Header { func (c *InstancesRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -76824,7 +78276,7 @@ func (c *InstancesResetCall) Header() http.Header { func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77007,7 +78459,7 @@ func (c *InstancesSetDeletionProtectionCall) Header() http.Header { func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77193,7 +78645,7 @@ func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77368,7 +78820,7 @@ func (c *InstancesSetIamPolicyCall) Header() http.Header { func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77550,7 +79002,7 @@ func (c *InstancesSetLabelsCall) Header() http.Header { func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77737,7 +79189,7 @@ func (c *InstancesSetMachineResourcesCall) Header() http.Header { func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -77924,7 +79376,7 @@ func (c *InstancesSetMachineTypeCall) Header() http.Header { func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78112,7 +79564,7 @@ func (c *InstancesSetMetadataCall) Header() http.Header { func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78301,7 +79753,7 @@ func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78491,7 +79943,7 @@ func (c *InstancesSetSchedulingCall) Header() http.Header { func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78679,7 +80131,7 @@ func (c *InstancesSetServiceAccountCall) Header() http.Header { func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -78868,7 +80320,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79056,7 +80508,7 @@ func (c *InstancesSetTagsCall) Header() http.Header { func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79222,7 +80674,7 @@ func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79395,7 +80847,7 @@ func (c *InstancesStartCall) Header() http.Header { func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79575,7 +81027,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79765,7 +81217,7 @@ func (c *InstancesStopCall) Header() http.Header { func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -79925,7 +81377,7 @@ func (c *InstancesTestIamPermissionsCall) Header() http.Header { func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80142,7 +81594,7 @@ func (c *InstancesUpdateCall) Header() http.Header { func (c *InstancesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80366,7 +81818,7 @@ func (c *InstancesUpdateAccessConfigCall) Header() http.Header { func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80562,7 +82014,7 @@ func (c *InstancesUpdateDisplayDeviceCall) Header() http.Header { func (c *InstancesUpdateDisplayDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80750,7 +82202,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -80946,7 +82398,7 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Header() http.Header { func (c *InstancesUpdateShieldedInstanceConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81160,6 +82612,15 @@ func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81197,7 +82658,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81301,6 +82762,11 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/interconnectAttachments", @@ -81404,7 +82870,7 @@ func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81572,7 +83038,7 @@ func (c *InterconnectAttachmentsGetCall) Header() http.Header { func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81755,7 +83221,7 @@ func (c *InterconnectAttachmentsInsertCall) Header() http.Header { func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -81954,6 +83420,15 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -81991,7 +83466,7 @@ func (c *InterconnectAttachmentsListCall) Header() http.Header { func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82098,6 +83573,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/interconnectAttachments", @@ -82205,7 +83685,7 @@ func (c *InterconnectAttachmentsPatchCall) Header() http.Header { func (c *InterconnectAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82381,7 +83861,7 @@ func (c *InterconnectLocationsGetCall) Header() http.Header { func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82564,6 +84044,15 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -82601,7 +84090,7 @@ func (c *InterconnectLocationsListCall) Header() http.Header { func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82699,6 +84188,11 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/interconnectLocations", @@ -82800,7 +84294,7 @@ func (c *InterconnectsDeleteCall) Header() http.Header { func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -82958,7 +84452,7 @@ func (c *InterconnectsGetCall) Header() http.Header { func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83115,7 +84609,7 @@ func (c *InterconnectsGetDiagnosticsCall) Header() http.Header { func (c *InterconnectsGetDiagnosticsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83281,7 +84775,7 @@ func (c *InterconnectsInsertCall) Header() http.Header { func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83464,6 +84958,15 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *InterconnectsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -83501,7 +85004,7 @@ func (c *InterconnectsListCall) Header() http.Header { func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83599,6 +85102,11 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/interconnects", @@ -83704,7 +85212,7 @@ func (c *InterconnectsPatchCall) Header() http.Header { func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -83872,7 +85380,7 @@ func (c *LicenseCodesGetCall) Header() http.Header { func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84021,7 +85529,7 @@ func (c *LicenseCodesTestIamPermissionsCall) Header() http.Header { func (c *LicenseCodesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84192,7 +85700,7 @@ func (c *LicensesDeleteCall) Header() http.Header { func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84352,7 +85860,7 @@ func (c *LicensesGetCall) Header() http.Header { func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84518,7 +86026,7 @@ func (c *LicensesGetIamPolicyCall) Header() http.Header { func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84690,7 +86198,7 @@ func (c *LicensesInsertCall) Header() http.Header { func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -84882,6 +86390,15 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *LicensesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *LicensesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -84919,7 +86436,7 @@ func (c *LicensesListCall) Header() http.Header { func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85017,6 +86534,11 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/licenses", @@ -85104,7 +86626,7 @@ func (c *LicensesSetIamPolicyCall) Header() http.Header { func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85257,7 +86779,7 @@ func (c *LicensesTestIamPermissionsCall) Header() http.Header { func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85458,6 +86980,15 @@ func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTyp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -85495,7 +87026,7 @@ func (c *MachineTypesAggregatedListCall) Header() http.Header { func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85598,6 +87129,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/machineTypes", @@ -85695,7 +87231,7 @@ func (c *MachineTypesGetCall) Header() http.Header { func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -85890,6 +87426,15 @@ func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *MachineTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *MachineTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -85927,7 +87472,7 @@ func (c *MachineTypesListCall) Header() http.Header { func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86028,6 +87573,11 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -86167,6 +87717,15 @@ func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *N return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -86204,7 +87763,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86308,6 +87867,11 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/networkEndpointGroups", @@ -86414,7 +87978,7 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86599,7 +88163,7 @@ func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86776,7 +88340,7 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -86951,7 +88515,7 @@ func (c *NetworkEndpointGroupsGetCall) Header() http.Header { func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87125,7 +88689,7 @@ func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87318,6 +88882,15 @@ func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -87355,7 +88928,7 @@ func (c *NetworkEndpointGroupsListCall) Header() http.Header { func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87456,6 +89029,11 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -87586,6 +89164,15 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken stri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -87613,7 +89200,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -87726,6 +89313,11 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", @@ -87821,7 +89413,7 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88001,7 +89593,7 @@ func (c *NetworksAddPeeringCall) Header() http.Header { func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88175,7 +89767,7 @@ func (c *NetworksDeleteCall) Header() http.Header { func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88334,7 +89926,7 @@ func (c *NetworksGetCall) Header() http.Header { func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88500,7 +90092,7 @@ func (c *NetworksInsertCall) Header() http.Header { func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88684,6 +90276,15 @@ func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -88721,7 +90322,7 @@ func (c *NetworksListCall) Header() http.Header { func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -88819,6 +90420,11 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networks", @@ -88967,6 +90573,15 @@ func (c *NetworksListPeeringRoutesCall) Region(region string) *NetworksListPeeri return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NetworksListPeeringRoutesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworksListPeeringRoutesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -89004,7 +90619,7 @@ func (c *NetworksListPeeringRoutesCall) Header() http.Header { func (c *NetworksListPeeringRoutesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89134,6 +90749,11 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "description": "The region of the request. The response will include all subnet routes, static routes and dynamic routes in the region.", // "location": "query", // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/networks/{network}/listPeeringRoutes", @@ -89239,7 +90859,7 @@ func (c *NetworksPatchCall) Header() http.Header { func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89414,7 +91034,7 @@ func (c *NetworksRemovePeeringCall) Header() http.Header { func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89588,7 +91208,7 @@ func (c *NetworksSwitchToCustomModeCall) Header() http.Header { func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89758,7 +91378,7 @@ func (c *NetworksUpdatePeeringCall) Header() http.Header { func (c *NetworksUpdatePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -89935,7 +91555,7 @@ func (c *NodeGroupsAddNodesCall) Header() http.Header { func (c *NodeGroupsAddNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90149,6 +91769,15 @@ func (c *NodeGroupsAggregatedListCall) PageToken(pageToken string) *NodeGroupsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -90186,7 +91815,7 @@ func (c *NodeGroupsAggregatedListCall) Header() http.Header { func (c *NodeGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90289,6 +91918,11 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeGroups", @@ -90392,7 +92026,7 @@ func (c *NodeGroupsDeleteCall) Header() http.Header { func (c *NodeGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90570,7 +92204,7 @@ func (c *NodeGroupsDeleteNodesCall) Header() http.Header { func (c *NodeGroupsDeleteNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90748,7 +92382,7 @@ func (c *NodeGroupsGetCall) Header() http.Header { func (c *NodeGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -90923,7 +92557,7 @@ func (c *NodeGroupsGetIamPolicyCall) Header() http.Header { func (c *NodeGroupsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91106,7 +92740,7 @@ func (c *NodeGroupsInsertCall) Header() http.Header { func (c *NodeGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91309,6 +92943,15 @@ func (c *NodeGroupsListCall) PageToken(pageToken string) *NodeGroupsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -91346,7 +92989,7 @@ func (c *NodeGroupsListCall) Header() http.Header { func (c *NodeGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91447,6 +93090,11 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -91575,6 +93223,15 @@ func (c *NodeGroupsListNodesCall) PageToken(pageToken string) *NodeGroupsListNod return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeGroupsListNodesCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeGroupsListNodesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -91602,7 +93259,7 @@ func (c *NodeGroupsListNodesCall) Header() http.Header { func (c *NodeGroupsListNodesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91709,6 +93366,11 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -91820,7 +93482,7 @@ func (c *NodeGroupsPatchCall) Header() http.Header { func (c *NodeGroupsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -91988,7 +93650,7 @@ func (c *NodeGroupsSetIamPolicyCall) Header() http.Header { func (c *NodeGroupsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92169,7 +93831,7 @@ func (c *NodeGroupsSetNodeTemplateCall) Header() http.Header { func (c *NodeGroupsSetNodeTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92337,7 +93999,7 @@ func (c *NodeGroupsTestIamPermissionsCall) Header() http.Header { func (c *NodeGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92546,6 +94208,15 @@ func (c *NodeTemplatesAggregatedListCall) PageToken(pageToken string) *NodeTempl return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -92583,7 +94254,7 @@ func (c *NodeTemplatesAggregatedListCall) Header() http.Header { func (c *NodeTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92686,6 +94357,11 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeTemplates", @@ -92789,7 +94465,7 @@ func (c *NodeTemplatesDeleteCall) Header() http.Header { func (c *NodeTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -92958,7 +94634,7 @@ func (c *NodeTemplatesGetCall) Header() http.Header { func (c *NodeTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93133,7 +94809,7 @@ func (c *NodeTemplatesGetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93315,7 +94991,7 @@ func (c *NodeTemplatesInsertCall) Header() http.Header { func (c *NodeTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93509,6 +95185,15 @@ func (c *NodeTemplatesListCall) PageToken(pageToken string) *NodeTemplatesListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTemplatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -93546,7 +95231,7 @@ func (c *NodeTemplatesListCall) Header() http.Header { func (c *NodeTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93653,6 +95338,11 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/nodeTemplates", @@ -93740,7 +95430,7 @@ func (c *NodeTemplatesSetIamPolicyCall) Header() http.Header { func (c *NodeTemplatesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -93903,7 +95593,7 @@ func (c *NodeTemplatesTestIamPermissionsCall) Header() http.Header { func (c *NodeTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94112,6 +95802,15 @@ func (c *NodeTypesAggregatedListCall) PageToken(pageToken string) *NodeTypesAggr return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -94149,7 +95848,7 @@ func (c *NodeTypesAggregatedListCall) Header() http.Header { func (c *NodeTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94252,6 +95951,11 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/nodeTypes", @@ -94348,7 +96052,7 @@ func (c *NodeTypesGetCall) Header() http.Header { func (c *NodeTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94542,6 +96246,15 @@ func (c *NodeTypesListCall) PageToken(pageToken string) *NodeTypesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *NodeTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NodeTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -94579,7 +96292,7 @@ func (c *NodeTypesListCall) Header() http.Header { func (c *NodeTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94680,6 +96393,11 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -94818,6 +96536,15 @@ func (c *PacketMirroringsAggregatedListCall) PageToken(pageToken string) *Packet return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -94855,7 +96582,7 @@ func (c *PacketMirroringsAggregatedListCall) Header() http.Header { func (c *PacketMirroringsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -94958,6 +96685,11 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/packetMirrorings", @@ -95061,7 +96793,7 @@ func (c *PacketMirroringsDeleteCall) Header() http.Header { func (c *PacketMirroringsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95229,7 +96961,7 @@ func (c *PacketMirroringsGetCall) Header() http.Header { func (c *PacketMirroringsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95405,7 +97137,7 @@ func (c *PacketMirroringsInsertCall) Header() http.Header { func (c *PacketMirroringsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95599,6 +97331,15 @@ func (c *PacketMirroringsListCall) PageToken(pageToken string) *PacketMirrorings return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *PacketMirroringsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PacketMirroringsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -95636,7 +97377,7 @@ func (c *PacketMirroringsListCall) Header() http.Header { func (c *PacketMirroringsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -95743,6 +97484,11 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/packetMirrorings", @@ -95850,7 +97596,7 @@ func (c *PacketMirroringsPatchCall) Header() http.Header { func (c *PacketMirroringsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96018,7 +97764,7 @@ func (c *PacketMirroringsTestIamPermissionsCall) Header() http.Header { func (c *PacketMirroringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96194,7 +97940,7 @@ func (c *ProjectsDisableXpnHostCall) Header() http.Header { func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96351,7 +98097,7 @@ func (c *ProjectsDisableXpnResourceCall) Header() http.Header { func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96513,7 +98259,7 @@ func (c *ProjectsEnableXpnHostCall) Header() http.Header { func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96671,7 +98417,7 @@ func (c *ProjectsEnableXpnResourceCall) Header() http.Header { func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96826,7 +98572,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -96972,7 +98718,7 @@ func (c *ProjectsGetXpnHostCall) Header() http.Header { func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97145,6 +98891,15 @@ func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXp return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsGetXpnResourcesCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -97182,7 +98937,7 @@ func (c *ProjectsGetXpnResourcesCall) Header() http.Header { func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97280,6 +99035,11 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/getXpnResources", @@ -97400,6 +99160,15 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ProjectsListXpnHostsCall) ReturnPartialSuccess(returnPartialSuccess bool) *ProjectsListXpnHostsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -97427,7 +99196,7 @@ func (c *ProjectsListXpnHostsCall) Header() http.Header { func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97527,6 +99296,11 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/listXpnHosts", @@ -97630,7 +99404,7 @@ func (c *ProjectsMoveDiskCall) Header() http.Header { func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97795,7 +99569,7 @@ func (c *ProjectsMoveInstanceCall) Header() http.Header { func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -97961,7 +99735,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98128,7 +99902,7 @@ func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98296,7 +100070,7 @@ func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98465,7 +100239,7 @@ func (c *RegionAutoscalersDeleteCall) Header() http.Header { func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98633,7 +100407,7 @@ func (c *RegionAutoscalersGetCall) Header() http.Header { func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -98809,7 +100583,7 @@ func (c *RegionAutoscalersInsertCall) Header() http.Header { func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99003,6 +100777,15 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -99040,7 +100823,7 @@ func (c *RegionAutoscalersListCall) Header() http.Header { func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99147,6 +100930,11 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/autoscalers", @@ -99259,7 +101047,7 @@ func (c *RegionAutoscalersPatchCall) Header() http.Header { func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99448,7 +101236,7 @@ func (c *RegionAutoscalersUpdateCall) Header() http.Header { func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99629,7 +101417,7 @@ func (c *RegionBackendServicesDeleteCall) Header() http.Header { func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99797,7 +101585,7 @@ func (c *RegionBackendServicesGetCall) Header() http.Header { func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -99956,7 +101744,7 @@ func (c *RegionBackendServicesGetHealthCall) Header() http.Header { func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100137,7 +101925,7 @@ func (c *RegionBackendServicesInsertCall) Header() http.Header { func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100331,6 +102119,15 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionBackendServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -100368,7 +102165,7 @@ func (c *RegionBackendServicesListCall) Header() http.Header { func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100475,6 +102272,11 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/backendServices", @@ -100583,7 +102385,7 @@ func (c *RegionBackendServicesPatchCall) Header() http.Header { func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100771,7 +102573,7 @@ func (c *RegionBackendServicesUpdateCall) Header() http.Header { func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -100984,6 +102786,15 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -101021,7 +102832,7 @@ func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101124,6 +102935,11 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/commitments", @@ -101220,7 +103036,7 @@ func (c *RegionCommitmentsGetCall) Header() http.Header { func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101396,7 +103212,7 @@ func (c *RegionCommitmentsInsertCall) Header() http.Header { func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101590,6 +103406,15 @@ func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmen return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionCommitmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionCommitmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -101627,7 +103452,7 @@ func (c *RegionCommitmentsListCall) Header() http.Header { func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -101734,6 +103559,11 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/commitments", @@ -101830,7 +103660,7 @@ func (c *RegionDiskTypesGetCall) Header() http.Header { func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102024,6 +103854,15 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDiskTypesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDiskTypesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -102061,7 +103900,7 @@ func (c *RegionDiskTypesListCall) Header() http.Header { func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102168,6 +104007,11 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/diskTypes", @@ -102275,7 +104119,7 @@ func (c *RegionDisksAddResourcePoliciesCall) Header() http.Header { func (c *RegionDisksAddResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102461,7 +104305,7 @@ func (c *RegionDisksCreateSnapshotCall) Header() http.Header { func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102648,7 +104492,7 @@ func (c *RegionDisksDeleteCall) Header() http.Header { func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102815,7 +104659,7 @@ func (c *RegionDisksGetCall) Header() http.Header { func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102990,7 +104834,7 @@ func (c *RegionDisksGetIamPolicyCall) Header() http.Header { func (c *RegionDisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103179,7 +105023,7 @@ func (c *RegionDisksInsertCall) Header() http.Header { func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103378,6 +105222,15 @@ func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionDisksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionDisksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -103415,7 +105268,7 @@ func (c *RegionDisksListCall) Header() http.Header { func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103522,6 +105375,11 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/disks", @@ -103628,7 +105486,7 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Header() http.Header { func (c *RegionDisksRemoveResourcePoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103814,7 +105672,7 @@ func (c *RegionDisksResizeCall) Header() http.Header { func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103982,7 +105840,7 @@ func (c *RegionDisksSetIamPolicyCall) Header() http.Header { func (c *RegionDisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104163,7 +106021,7 @@ func (c *RegionDisksSetLabelsCall) Header() http.Header { func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104331,7 +106189,7 @@ func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104511,7 +106369,7 @@ func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104678,7 +106536,7 @@ func (c *RegionHealthCheckServicesGetCall) Header() http.Header { func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104853,7 +106711,7 @@ func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105047,6 +106905,15 @@ func (c *RegionHealthCheckServicesListCall) PageToken(pageToken string) *RegionH return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthCheckServicesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthCheckServicesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -105084,7 +106951,7 @@ func (c *RegionHealthCheckServicesListCall) Header() http.Header { func (c *RegionHealthCheckServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105191,6 +107058,11 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/healthCheckServices", @@ -105298,7 +107170,7 @@ func (c *RegionHealthCheckServicesPatchCall) Header() http.Header { func (c *RegionHealthCheckServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105481,7 +107353,7 @@ func (c *RegionHealthChecksDeleteCall) Header() http.Header { func (c *RegionHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105650,7 +107522,7 @@ func (c *RegionHealthChecksGetCall) Header() http.Header { func (c *RegionHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105826,7 +107698,7 @@ func (c *RegionHealthChecksInsertCall) Header() http.Header { func (c *RegionHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106020,6 +107892,15 @@ func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthCh return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionHealthChecksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionHealthChecksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -106057,7 +107938,7 @@ func (c *RegionHealthChecksListCall) Header() http.Header { func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106164,6 +108045,11 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/healthChecks", @@ -106271,7 +108157,7 @@ func (c *RegionHealthChecksPatchCall) Header() http.Header { func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106458,7 +108344,7 @@ func (c *RegionHealthChecksUpdateCall) Header() http.Header { func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106660,7 +108546,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106826,7 +108712,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.H func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107009,7 +108895,7 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107192,7 +109078,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107384,7 +109270,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107550,7 +109436,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107720,7 +109606,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107901,7 +109787,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108094,6 +109980,15 @@ func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108131,7 +110026,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108237,6 +110132,11 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers", @@ -108362,6 +110262,15 @@ func (c *RegionInstanceGroupManagersListErrorsCall) PageToken(pageToken string) return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListErrorsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListErrorsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108399,7 +110308,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108515,6 +110424,11 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listErrors", @@ -108640,6 +110554,15 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108667,7 +110590,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108780,6 +110703,11 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", @@ -108904,6 +110832,15 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageTo return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -108931,7 +110868,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.He func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109044,6 +110981,11 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", @@ -109155,7 +111097,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109342,7 +111284,7 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Header() http.H func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109540,7 +111482,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109735,7 +111677,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109922,7 +111864,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110108,7 +112050,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110295,7 +112237,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http. func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110469,7 +112411,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110661,6 +112603,15 @@ func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstan return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -110698,7 +112649,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110804,6 +112755,11 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroups", @@ -110932,6 +112888,15 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -110959,7 +112924,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111076,6 +113041,11 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", @@ -111185,7 +113155,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111369,7 +113339,7 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111536,7 +113506,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111710,7 +113680,7 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111903,6 +113873,15 @@ func (c *RegionNetworkEndpointGroupsListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNetworkEndpointGroupsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -111940,7 +113919,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Header() http.Header { func (c *RegionNetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112046,6 +114025,11 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // "location": "path", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/networkEndpointGroups", @@ -112150,7 +114134,7 @@ func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112319,7 +114303,7 @@ func (c *RegionNotificationEndpointsGetCall) Header() http.Header { func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112495,7 +114479,7 @@ func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112689,6 +114673,15 @@ func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *Regio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionNotificationEndpointsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -112726,7 +114719,7 @@ func (c *RegionNotificationEndpointsListCall) Header() http.Header { func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112833,6 +114826,11 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/notificationEndpoints", @@ -112918,7 +114916,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113054,7 +115052,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113249,6 +115247,15 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -113286,7 +115293,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113393,6 +115400,11 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/operations", @@ -113490,7 +115502,7 @@ func (c *RegionOperationsWaitCall) Header() http.Header { func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113662,7 +115674,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113832,7 +115844,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114008,7 +116020,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114202,6 +116214,15 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -114239,7 +116260,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114346,6 +116367,11 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/sslCertificates", @@ -114449,7 +116475,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114619,7 +116645,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114795,7 +116821,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114989,6 +117015,15 @@ func (c *RegionTargetHttpProxiesListCall) PageToken(pageToken string) *RegionTar return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -115026,7 +117061,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115133,6 +117168,11 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetHttpProxies", @@ -115238,7 +117278,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115422,7 +117462,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115592,7 +117632,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115768,7 +117808,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115962,6 +118002,15 @@ func (c *RegionTargetHttpsProxiesListCall) PageToken(pageToken string) *RegionTa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionTargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -115999,7 +118048,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116106,6 +118155,11 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetHttpsProxies", @@ -116211,7 +118265,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116397,7 +118451,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116569,7 +118623,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116738,7 +118792,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116902,7 +118956,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117096,6 +119150,15 @@ func (c *RegionUrlMapsListCall) PageToken(pageToken string) *RegionUrlMapsListCa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionUrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionUrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -117133,7 +119196,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117240,6 +119303,11 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/urlMaps", @@ -117335,7 +119403,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117510,7 +119578,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117679,7 +119747,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117850,7 +119918,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118034,6 +120102,15 @@ func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RegionsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -118071,7 +120148,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118169,6 +120246,11 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions", @@ -118301,6 +120383,15 @@ func (c *ReservationsAggregatedListCall) PageToken(pageToken string) *Reservatio return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -118338,7 +120429,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118441,6 +120532,11 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/reservations", @@ -118544,7 +120640,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118712,7 +120808,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118887,7 +120983,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119069,7 +121165,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119263,6 +121359,15 @@ func (c *ReservationsListCall) PageToken(pageToken string) *ReservationsListCall return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ReservationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119300,7 +121405,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119401,6 +121506,11 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -119514,7 +121624,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119682,7 +121792,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119845,7 +121955,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120054,6 +122164,15 @@ func (c *ResourcePoliciesAggregatedListCall) PageToken(pageToken string) *Resour return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -120091,7 +122210,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120194,6 +122313,11 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/resourcePolicies", @@ -120297,7 +122421,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120465,7 +122589,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120640,7 +122764,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120821,7 +122945,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121015,6 +123139,15 @@ func (c *ResourcePoliciesListCall) PageToken(pageToken string) *ResourcePolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ResourcePoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ResourcePoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -121052,7 +123185,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121159,6 +123292,11 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/resourcePolicies", @@ -121246,7 +123384,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121409,7 +123547,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121618,6 +123756,15 @@ func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -121655,7 +123802,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121758,6 +123905,11 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/routers", @@ -121861,7 +124013,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122030,7 +124182,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122226,6 +124378,15 @@ func (c *RoutersGetNatMappingInfoCall) PageToken(pageToken string) *RoutersGetNa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersGetNatMappingInfoCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122263,7 +124424,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122373,6 +124534,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "router": { // "description": "Name of the Router resource to query for Nat Mapping information of VM endpoints.", // "location": "path", @@ -122475,7 +124641,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122651,7 +124817,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122845,6 +125011,15 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutersListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -122882,7 +125057,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122989,6 +125164,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/routers", @@ -123096,7 +125276,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123265,7 +125445,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123451,7 +125631,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123634,7 +125814,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123793,7 +125973,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123959,7 +126139,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124143,6 +126323,15 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *RoutesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RoutesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -124180,7 +126369,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124278,6 +126467,11 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/routes", @@ -124362,7 +126556,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124530,7 +126724,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124688,7 +126882,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124851,7 +127045,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125022,7 +127216,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125205,6 +127399,15 @@ func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPolicies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -125242,7 +127445,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125340,6 +127543,11 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/securityPolicies", @@ -125460,6 +127668,15 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) PageToken(pageToke return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) ReturnPartialSuccess(returnPartialSuccess bool) *SecurityPoliciesListPreconfiguredExpressionSetsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -125497,7 +127714,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Head func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125598,6 +127815,11 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/securityPolicies/listPreconfiguredExpressionSets", @@ -125680,7 +127902,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125843,7 +128065,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126005,7 +128227,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126178,7 +128400,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126337,7 +128559,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126501,7 +128723,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126691,6 +128913,15 @@ func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SnapshotsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SnapshotsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -126728,7 +128959,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126826,6 +129057,11 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/snapshots", @@ -126911,7 +129147,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127063,7 +129299,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127215,7 +129451,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127416,6 +129652,15 @@ func (c *SslCertificatesAggregatedListCall) PageToken(pageToken string) *SslCert return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -127453,7 +129698,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127556,6 +129801,11 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/sslCertificates", @@ -127657,7 +129907,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127815,7 +130065,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127980,7 +130230,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128163,6 +130413,15 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslCertificatesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -128200,7 +130459,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128298,6 +130557,11 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/sslCertificates", @@ -128401,7 +130665,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128558,7 +130822,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128722,7 +130986,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128905,6 +131169,15 @@ func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -128942,7 +131215,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129040,6 +131313,11 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/sslPolicies", @@ -129160,6 +131438,15 @@ func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslP return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129197,7 +131484,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129297,6 +131584,11 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/sslPolicies/listAvailableFeatures", @@ -129380,7 +131672,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129583,6 +131875,15 @@ func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *Subnetworks return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129620,7 +131921,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129723,6 +132024,11 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/subnetworks", @@ -129826,7 +132132,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130005,7 +132311,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130182,7 +132488,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130357,7 +132663,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130539,7 +132845,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130733,6 +133039,15 @@ func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -130770,7 +133085,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130877,6 +133192,11 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/subnetworks", @@ -130997,6 +133317,15 @@ func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *SubnetworksListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *SubnetworksListUsableCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -131034,7 +133363,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131132,6 +133461,11 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/subnetworks/listUsable", @@ -131255,7 +133589,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131429,7 +133763,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131612,7 +133946,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131780,7 +134114,7 @@ func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131958,7 +134292,7 @@ func (c *TargetGrpcProxiesDeleteCall) Header() http.Header { func (c *TargetGrpcProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132116,7 +134450,7 @@ func (c *TargetGrpcProxiesGetCall) Header() http.Header { func (c *TargetGrpcProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132281,7 +134615,7 @@ func (c *TargetGrpcProxiesInsertCall) Header() http.Header { func (c *TargetGrpcProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132463,6 +134797,15 @@ func (c *TargetGrpcProxiesListCall) PageToken(pageToken string) *TargetGrpcProxi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetGrpcProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetGrpcProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -132500,7 +134843,7 @@ func (c *TargetGrpcProxiesListCall) Header() http.Header { func (c *TargetGrpcProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132598,6 +134941,11 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetGrpcProxies", @@ -132703,7 +135051,7 @@ func (c *TargetGrpcProxiesPatchCall) Header() http.Header { func (c *TargetGrpcProxiesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132908,6 +135256,15 @@ func (c *TargetHttpProxiesAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -132945,7 +135302,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133048,6 +135405,11 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetHttpProxies", @@ -133150,7 +135512,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133309,7 +135671,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133475,7 +135837,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133659,6 +136021,15 @@ func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -133696,7 +136067,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133794,6 +136165,11 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetHttpProxies", @@ -133830,6 +136206,184 @@ func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHtt } } +// method id "compute.targetHttpProxies.patch": + +type TargetHttpProxiesPatchCall struct { + s *Service + project string + targetHttpProxy string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified TargetHttpProxy resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. (== +// suppress_warning http-rest-shadowed ==) +func (r *TargetHttpProxiesService) Patch(project string, targetHttpProxy string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesPatchCall { + c := &TargetHttpProxiesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + c.targethttpproxy = targethttpproxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesPatchCall) RequestId(requestId string) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesPatchCall) Fields(s ...googleapi.Field) *TargetHttpProxiesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesPatchCall) Context(ctx context.Context) *TargetHttpProxiesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpProxiesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpProxiesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpProxies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified TargetHttpProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules. (== suppress_warning http-rest-shadowed ==)", + // "httpMethod": "PATCH", + // "id": "compute.targetHttpProxies.patch", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpProxies.setUrlMap": type TargetHttpProxiesSetUrlMapCall struct { @@ -133898,7 +136452,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134103,6 +136657,15 @@ func (c *TargetHttpsProxiesAggregatedListCall) PageToken(pageToken string) *Targ return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -134140,7 +136703,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134243,6 +136806,11 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetHttpsProxies", @@ -134344,7 +136912,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134502,7 +137070,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134667,7 +137235,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134850,6 +137418,15 @@ func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsPro return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetHttpsProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetHttpsProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -134887,7 +137464,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134985,6 +137562,11 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetHttpsProxies", @@ -135088,7 +137670,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135262,7 +137844,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135441,7 +138023,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135615,7 +138197,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135820,6 +138402,15 @@ func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetI return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -135857,7 +138448,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135960,6 +138551,11 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetInstances", @@ -136064,7 +138660,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136234,7 +138830,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136411,7 +139007,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136606,6 +139202,15 @@ func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesLi return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -136643,7 +139248,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136744,6 +139349,11 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone scoping this request.", // "location": "path", @@ -136856,7 +139466,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137043,7 +139653,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137257,6 +139867,15 @@ func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPools return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -137294,7 +139913,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137397,6 +140016,11 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetPools", @@ -137501,7 +140125,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137671,7 +140295,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137831,7 +140455,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138013,7 +140637,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138208,6 +140832,15 @@ func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetPoolsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetPoolsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -138245,7 +140878,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138352,6 +140985,11 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetPools", @@ -138458,7 +141096,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138645,7 +141283,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138839,7 +141477,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139027,7 +141665,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139185,7 +141823,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139350,7 +141988,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139533,6 +142171,15 @@ func (c *TargetSslProxiesListCall) PageToken(pageToken string) *TargetSslProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetSslProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetSslProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -139570,7 +142217,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139668,6 +142315,11 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetSslProxies", @@ -139771,7 +142423,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139946,7 +142598,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140121,7 +142773,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140299,7 +142951,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140471,7 +143123,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140629,7 +143281,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140794,7 +143446,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140977,6 +143629,15 @@ func (c *TargetTcpProxiesListCall) PageToken(pageToken string) *TargetTcpProxies return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -141014,7 +143675,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141112,6 +143773,11 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/targetTcpProxies", @@ -141215,7 +143881,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141390,7 +144056,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141594,6 +144260,15 @@ func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *Targe return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -141631,7 +144306,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -141734,6 +144409,11 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/targetVpnGateways", @@ -141837,7 +144517,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142006,7 +144686,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142182,7 +144862,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142376,6 +145056,15 @@ func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewa return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *TargetVpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetVpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -142413,7 +145102,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142520,6 +145209,11 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/targetVpnGateways", @@ -142653,6 +145347,15 @@ func (c *UrlMapsAggregatedListCall) PageToken(pageToken string) *UrlMapsAggregat return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -142690,7 +145393,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -142793,6 +145496,11 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/urlMaps", @@ -142895,7 +145603,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143054,7 +145762,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143220,7 +145928,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143333,6 +146041,9 @@ type UrlMapsInvalidateCacheCall struct { // InvalidateCache: Initiates a cache invalidation operation, // invalidating the specified path, scoped to the specified UrlMap. +// +// For more information, see [Invalidating cached +// content](/cdn/docs/invalidating-cached-content). func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143387,7 +146098,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143452,7 +146163,7 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.\n\nFor more information, see [Invalidating cached content](/cdn/docs/invalidating-cached-content).", // "httpMethod": "POST", // "id": "compute.urlMaps.invalidateCache", // "parameterOrder": [ @@ -143580,6 +146291,15 @@ func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *UrlMapsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *UrlMapsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -143617,7 +146337,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143715,6 +146435,11 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/urlMaps", @@ -143821,7 +146546,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -143998,7 +146723,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144157,7 +146882,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144356,6 +147081,15 @@ func (c *VpnGatewaysAggregatedListCall) PageToken(pageToken string) *VpnGateways return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -144393,7 +147127,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144496,6 +147230,11 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/vpnGateways", @@ -144599,7 +147338,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144768,7 +147507,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -144935,7 +147674,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145111,7 +147850,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145305,6 +148044,15 @@ func (c *VpnGatewaysListCall) PageToken(pageToken string) *VpnGatewaysListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnGatewaysListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnGatewaysListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -145342,7 +148090,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145449,6 +148197,11 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/vpnGateways", @@ -145555,7 +148308,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145723,7 +148476,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -145932,6 +148685,15 @@ func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAg return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -145969,7 +148731,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146072,6 +148834,11 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/aggregated/vpnTunnels", @@ -146175,7 +148942,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146344,7 +149111,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146520,7 +149287,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146714,6 +149481,15 @@ func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *VpnTunnelsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *VpnTunnelsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -146751,7 +149527,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -146858,6 +149634,11 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/regions/{region}/vpnTunnels", @@ -146943,7 +149724,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147079,7 +149860,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147274,6 +150055,15 @@ func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsList return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZoneOperationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZoneOperationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -147311,7 +150101,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147412,6 +150202,11 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // "required": true, // "type": "string" // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" + // }, // "zone": { // "description": "Name of the zone for request.", // "location": "path", @@ -147515,7 +150310,7 @@ func (c *ZoneOperationsWaitCall) Header() http.Header { func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147679,7 +150474,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147863,6 +150658,15 @@ func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { return c } +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false and the logic is the same as today. +func (c *ZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *ZonesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -147900,7 +150704,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -147998,6 +150802,11 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false and the logic is the same as today.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/zones", diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json deleted file mode 100644 index 6b36a92961ead..0000000000000 --- a/vendor/google.golang.org/api/internal/service-account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "project_id", - "private_key_id": "private_key_id", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", - "client_email": "xyz@developer.gserviceaccount.com", - "client_id": "123", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 26259b82abb6d..7c0f9292d504e 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -41,6 +41,7 @@ type DialSettings struct { CustomClaims map[string]interface{} SkipValidation bool ImpersonationConfig *impersonate.Config + EnableDirectPath bool // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index b4d78a830ae32..6ee4501c048cf 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -50,3 +50,18 @@ type skipDialSettingsValidation struct{} func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { settings.SkipValidation = true } + +// EnableDirectPath returns a ClientOption that overrides the default +// attempt to use DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPath(dp bool) option.ClientOption { + return enableDirectPath(dp) +} + +type enableDirectPath bool + +func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index fccf2cf7c2a87..1e076ab66d440 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"-2NioU2H8y8siEzrBOV_qzRI6kQ/sPMqcoPqdFD0a_Hhegm2T9RGrZc\"", + "etag": "\"3133373531323239383338313531333236393038\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1781,7 +1781,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", "location": "query", "type": "string" }, @@ -3229,7 +3229,7 @@ } } }, - "revision": "20200826", + "revision": "20200927", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3579,10 +3579,6 @@ "type": "string" }, "type": "array" - }, - "zoneSeparation": { - "description": "If set, objects placed in this bucket are required to be separated by disaster domain.", - "type": "boolean" } }, "type": "object" @@ -4096,7 +4092,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key.", + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", "type": "string" }, "md5Hash": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 8b51ef4bd5140..9e2483bbd5905 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -400,10 +400,6 @@ type Bucket struct { // requests will fail with a 400 Bad Request response. ZoneAffinity []string `json:"zoneAffinity,omitempty"` - // ZoneSeparation: If set, objects placed in this bucket are required to - // be separated by disaster domain. - ZoneSeparation bool `json:"zoneSeparation,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1736,8 +1732,8 @@ type Object struct { // storage#object. Kind string `json:"kind,omitempty"` - // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object - // is encrypted by such a key. + // KmsKeyName: Not currently supported. Specifying the parameter causes + // the request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more @@ -2444,7 +2440,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2592,7 +2588,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2759,7 +2755,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2932,7 +2928,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3093,7 +3089,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3267,7 +3263,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3453,7 +3449,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3632,7 +3628,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3838,7 +3834,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4055,7 +4051,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4312,7 +4308,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4522,7 +4518,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4757,7 +4753,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4986,7 +4982,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5161,7 +5157,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5401,7 +5397,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5613,7 +5609,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5730,7 +5726,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5878,7 +5874,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6046,7 +6042,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6236,7 +6232,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6409,7 +6405,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6583,7 +6579,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6755,7 +6751,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6903,7 +6899,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7073,7 +7069,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7248,7 +7244,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7421,7 +7417,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7593,7 +7589,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7784,7 +7780,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7981,7 +7977,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8166,7 +8162,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8364,7 +8360,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8549,9 +8545,11 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Not currently -// supported. Specifying the parameter causes the request to fail with -// status code 400 - Bad Request. +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c @@ -8599,7 +8597,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8718,7 +8716,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "kmsKeyName": { - // "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", // "location": "query", // "type": "string" // }, @@ -8942,7 +8940,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9270,7 +9268,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9503,7 +9501,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9753,7 +9751,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10070,7 +10068,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10443,7 +10441,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10760,7 +10758,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11152,7 +11150,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11455,7 +11453,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11655,7 +11653,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11916,7 +11914,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12234,7 +12232,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12450,7 +12448,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12600,7 +12598,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12735,7 +12733,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12935,7 +12933,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13130,7 +13128,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13307,7 +13305,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200912") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 77ee798e0f7d0..f8a6ca299817c 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -9,15 +9,16 @@ package grpc import ( "context" + "crypto/tls" "errors" "log" - "os" "strings" "go.opencensus.io/plugin/ocgrpc" "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/internal/dca" "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" @@ -112,6 +113,10 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if o.GRPCConn != nil { return o.GRPCConn, nil } + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o) + if err != nil { + return nil, err + } var grpcOpts []grpc.DialOption if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} @@ -132,11 +137,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // * The endpoint is a host:port (or dns:///host:port). // * Credentials are obtained via GCE metadata server, using the default // service account. - // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. - // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub - if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { - if !strings.HasPrefix(o.Endpoint, "dns:///") { - o.Endpoint = "dns:///" + o.Endpoint + if o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint } grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle( @@ -150,13 +153,16 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. } else { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } grpcOpts = []grpc.DialOption{ grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{creds.TokenSource}, quotaProject: o.QuotaProject, requestReason: o.RequestReason, }), - grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), } } } @@ -180,34 +186,11 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // point when isDirectPathEnabled will default to true, we guard it by // the Directpath env var for now once we can introspect user defined // dialer (https://github.com/grpc/grpc-go/issues/2795). - if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + if timeoutDialerOption != nil && o.EnableDirectPath && checkDirectPathEndPoint(endpoint) { grpcOpts = append(grpcOpts, timeoutDialerOption) } - return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) -} - -// generateDefaultMtlsEndpoint attempts to derive the mTLS version of the -// defaultEndpoint via regex, and returns defaultEndpoint if unsuccessful. -// -// We need to applying the following 2 transformations: -// 1. pubsub.googleapis.com to pubsub.mtls.googleapis.com -// 2. pubsub.sandbox.googleapis.com to pubsub.mtls.sandbox.googleapis.com -// -// TODO(cbro): In the future, the mTLS endpoint will be read from Service Config -// and passed in as defaultMtlsEndpoint instead of generated from defaultEndpoint, -// and this function will be removed. -func generateDefaultMtlsEndpoint(defaultEndpoint string) string { - var domains = []string{ - ".sandbox.googleapis.com", // must come first because .googleapis.com is a substring - ".googleapis.com", - } - for _, domain := range domains { - if strings.Contains(defaultEndpoint, domain) { - return strings.Replace(defaultEndpoint, domain, ".mtls"+domain, -1) - } - } - return defaultEndpoint + return grpc.DialContext(ctx, endpoint, grpcOpts...) } func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { @@ -264,8 +247,8 @@ func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { return true } -func isDirectPathEnabled(endpoint string) bool { - // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). // Also don't try direct path if the user has chosen an alternate name resolver // (i.e., via ":///" prefix). // @@ -275,15 +258,11 @@ func isDirectPathEnabled(endpoint string) bool { return false } - // Only try direct path if the user has opted in via the environment variable. - whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") - for _, api := range whitelist { - // Ignore empty string since an empty env variable splits into [""] - if api != "" && strings.Contains(endpoint, api) { - return true - } + if endpoint == "" { + return false } - return false + + return true } func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { @@ -295,12 +274,6 @@ func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, return nil, err } - // NOTE(cbro): this is used only by the nightly mtls_smoketest and should - // not otherwise be used. It will be removed or renamed at some point. - if os.Getenv("GOOGLE_API_USE_MTLS") == "always" { - o.Endpoint = generateDefaultMtlsEndpoint(o.Endpoint) - } - return &o, nil } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 44503014172c9..8578cac9ef21b 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -13,9 +13,6 @@ import ( "errors" "net" "net/http" - "net/url" - "os" - "strings" "time" "go.opencensus.io/plugin/ochttp" @@ -25,12 +22,7 @@ import ( "google.golang.org/api/option" "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" -) - -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" + "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -41,11 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -218,87 +206,3 @@ func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) ht Propagation: &propagation.HTTPFormat{}, } } - -// getClientCertificateSource returns a default client certificate source, if -// not provided by the user. -// -// A nil default source can be returned if the source does not exist. Any exceptions -// encountered while initializing the default source will be reported as client -// error (ex. corrupt metadata file). -// -// The overall logic is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. -// -// Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. -// -// We would like to avoid introducing client-side logic that parses whether the -// endpoint override is an mTLS url, since the url pattern may change at anytime. -func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { - if settings.HTTPClient != nil { - return nil, nil // HTTPClient is incompatible with ClientCertificateSource - } else if settings.ClientCertSource != nil { - return settings.ClientCertSource, nil - } else { - return cert.DefaultSource() - } - -} - -// getEndpoint returns the endpoint for the service, taking into account the -// user-provided endpoint override "settings.Endpoint" -// -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. -// -// You can override the default endpoint (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS environment variable. -// -// If the endpoint override is an address (host:port) rather than full base -// URL (ex. https://...), then the user-provided address will be merged into -// the default endpoint. For example, WithEndpoint("myhost:8000") and -// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { - if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - return settings.DefaultMTLSEndpoint, nil - } - return settings.DefaultEndpoint, nil - } - if strings.Contains(settings.Endpoint, "://") { - // User passed in a full URL path, use it verbatim. - return settings.Endpoint, nil - } - if settings.DefaultEndpoint == "" { - return "", errors.New("WithEndpoint requires a full URL path") - } - - // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) -} - -func getMTLSMode() string { - mode := os.Getenv("GOOGLE_API_USE_MTLS") - if mode == "" { - // TODO(shinfan): Update this to "auto" when the mTLS feature is fully released. - return mTLSModeNever - } - return strings.ToLower(mode) -} - -func mergeEndpoints(base, newHost string) (string, error) { - u, err := url.Parse(base) - if err != nil { - return "", err - } - u.Host = newHost - return u.String(), nil -} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go new file mode 100644 index 0000000000000..b3be7e4e3a7ff --- /dev/null +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -0,0 +1,145 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dca contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package dca + +import ( + "net/url" + "os" + "strings" + + "google.golang.org/api/internal" + "google.golang.org/api/transport/cert" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + +// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.HTTPClient != nil { + return nil, nil // HTTPClient is incompatible with ClientCertificateSource + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 1f4a292f51641..9f822faee171f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -58,7 +58,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale // Scale represents a scaling request for a resource. @@ -81,7 +81,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.5 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet // DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for @@ -284,7 +284,7 @@ type StatefulSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.5 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList // StatefulSetList is a collection of StatefulSets. @@ -299,7 +299,7 @@ type StatefulSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for @@ -373,7 +373,7 @@ type DeploymentSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentRollback // DEPRECATED. @@ -534,7 +534,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.6 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -552,7 +552,7 @@ type DeploymentList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.7 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the @@ -583,7 +583,7 @@ type ControllerRevision struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.7 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList // ControllerRevisionList is a resource containing a list of ControllerRevision objects. diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go index 80a5e2f2bd54e..f3850fc90cf2a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -141,7 +141,7 @@ func (in *DeploymentRollback) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -165,7 +165,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -189,7 +189,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -213,5 +213,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 3d294697c13d5..fc542ac1c872f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -60,7 +60,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale // Scale represents a scaling request for a resource. @@ -85,7 +85,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet // DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for @@ -292,7 +292,7 @@ type StatefulSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList // StatefulSetList is a collection of StatefulSets. @@ -307,7 +307,7 @@ type StatefulSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for @@ -510,7 +510,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -681,7 +681,7 @@ type DaemonSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for @@ -718,7 +718,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList // DaemonSetList is a collection of daemon sets. @@ -737,7 +737,7 @@ type DaemonSetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for @@ -769,7 +769,7 @@ type ReplicaSet struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList // ReplicaSetList is a collection of ReplicaSets. @@ -873,7 +873,7 @@ type ReplicaSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the @@ -904,7 +904,7 @@ type ControllerRevision struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.8 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList // ControllerRevisionList is a resource containing a list of ControllerRevision objects. diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go index 3a63b8f12876f..3368a18964de7 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -141,7 +141,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -165,7 +165,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -189,7 +189,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -213,7 +213,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -237,7 +237,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -261,7 +261,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -285,5 +285,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index a1ef1a10badd7..bd75c51bcca7d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -52,7 +52,7 @@ type ScaleStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.2 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // represents a scaling request for a resource. type Scale struct { @@ -76,7 +76,7 @@ type Scale struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment // DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for @@ -153,7 +153,7 @@ type DeploymentSpec struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // DEPRECATED. // DeploymentRollback stores the information required to rollback a deployment. @@ -313,7 +313,7 @@ type DeploymentCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList // DeploymentList is a list of Deployments. @@ -491,7 +491,7 @@ type DaemonSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for @@ -534,7 +534,7 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.1 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList // DaemonSetList is a collection of daemon sets. @@ -808,7 +808,7 @@ type IngressBackend struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -840,7 +840,7 @@ type ReplicaSet struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.8 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList // ReplicaSetList is a collection of ReplicaSets. @@ -946,7 +946,7 @@ type ReplicaSetCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy // PodSecurityPolicy governs the ability to make requests that affect the Security Context @@ -1308,7 +1308,7 @@ const AllowAllRuntimeClassNames = "*" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.2 // +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1328,7 +1328,7 @@ type PodSecurityPolicyList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicy // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1502,7 +1502,7 @@ type NetworkPolicyPeer struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 // +k8s:prerelease-lifecycle-gen:deprecated=1.9 -// +k8s:prerelease-lifecycle-gen:removed=1.18 +// +k8s:prerelease-lifecycle-gen:removed=1.16 // +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicyList // DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 8630905bf2084..5023dd31a5dfd 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -45,7 +45,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -69,7 +69,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Deployment) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -135,7 +135,7 @@ func (in *DeploymentRollback) APILifecycleDeprecated() (major, minor int) { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -207,7 +207,7 @@ func (in *NetworkPolicy) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *NetworkPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -231,7 +231,7 @@ func (in *NetworkPolicyList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -255,7 +255,7 @@ func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -279,7 +279,7 @@ func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKi // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -303,7 +303,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -327,7 +327,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -345,5 +345,5 @@ func (in *Scale) APILifecycleDeprecated() (major, minor int) { // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *Scale) APILifecycleRemoved() (major, minor int) { - return 1, 18 + return 1, 16 } diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 2f9f6f0d82bcd..64d29622e8972 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -27,7 +27,7 @@ Historical context is available here: How to use klog =============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"` - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) @@ -35,6 +35,10 @@ How to use klog **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. +### Coexisting with klog/v2 + +See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2. + ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md new file mode 100644 index 0000000000000..2083d44cdf90a --- /dev/null +++ b/vendor/k8s.io/klog/v2/SECURITY.md @@ -0,0 +1,22 @@ +# Security Policy + +## Security Announcements + +Join the [kubernetes-security-announce] group for security and vulnerability announcements. + +You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. + +## Reporting a Vulnerability + +Instructions for reporting a vulnerability can be found on the +[Kubernetes Security and Disclosure Information] page. + +## Supported Versions + +Information about supported Kubernetes versions can be found on the +[Kubernetes version and version skew support policy] page on the Kubernetes website. + +[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce +[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 +[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions +[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 49f1f2dd2d87e..23cced6250761 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -413,6 +413,7 @@ func init() { logging.skipHeaders = false logging.addDirHeader = false logging.skipLogHeaders = false + logging.oneOutput = false go logging.flushDaemon() } @@ -432,6 +433,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -505,6 +507,12 @@ type loggingT struct { // If set, all output will be redirected unconditionally to the provided logr.Logger logr logr.Logger + + // If true, messages will not be propagated to lower severity log levels + oneOutput bool + + // If set, all output will be filtered through the filter. + filter LogFilter } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -687,7 +695,7 @@ func (buf *buffer) someDigits(i, d int) int { return copy(buf.tmp[i:], buf.tmp[j:]) } -func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { +func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -695,15 +703,18 @@ func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) { l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprintln(buf, args...) l.output(s, logr, buf, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, args ...interface{}) { - l.printDepth(s, logr, 1, args...) +func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logr, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -711,6 +722,9 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -718,7 +732,7 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...i l.output(s, logr, buf, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...interface{}) { +func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { buf, file, line := l.header(s, 0) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -726,6 +740,9 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + format, args = filter.FilterF(format, args) + } fmt.Fprintf(buf, format, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -736,7 +753,7 @@ func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...i // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers @@ -744,6 +761,9 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, l.putBuffer(buf) buf = l.getBuffer() } + if filter != nil { + args = filter.Filter(args) + } fmt.Fprint(buf, args...) if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') @@ -752,18 +772,24 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Error(err, msg, keysAndValues) + loggr.Error(err, msg, keysAndValues...) return } l.printS(err, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { + if filter != nil { + msg, keysAndValues = filter.FilterS(msg, keysAndValues) + } if loggr != nil { - loggr.Info(msg, keysAndValues) + loggr.Info(msg, keysAndValues...) return } l.printS(nil, msg, keysAndValues...) @@ -785,7 +811,7 @@ func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { } else { s = errorLog } - l.printDepth(s, logging.logr, 2, b) + l.printDepth(s, logging.logr, nil, 2, b) } const missingValue = "(MISSING)" @@ -919,18 +945,22 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, } } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) + if l.oneOutput { + l.file[s].Write(data) + } else { + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } } @@ -1077,11 +1107,19 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { } var err error sb.file, _, err = create(severityName[sb.sev], now, startup) - sb.nbytes = 0 if err != nil { return err } - + if startup { + fileInfo, err := sb.file.Stat() + if err != nil { + return fmt.Errorf("file stat could not get fileinfo: %v", err) + } + // init file size + sb.nbytes = uint64(fileInfo.Size()) + } else { + sb.nbytes = 0 + } sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) if sb.logger.skipLogHeaders { @@ -1197,7 +1235,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, file, line, true, text) + logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) return len(b), nil } @@ -1232,13 +1270,14 @@ func (l *loggingT) setV(pc uintptr) Level { type Verbose struct { enabled bool logr logr.Logger + filter LogFilter } func newVerbose(level Level, b bool) Verbose { if logging.logr == nil { - return Verbose{b, nil} + return Verbose{b, nil, logging.filter} } - return Verbose{b, logging.logr.V(int(level))} + return Verbose{b, logging.logr.V(int(level)), logging.filter} } // V reports whether verbosity at the call site is at least the requested level. @@ -1265,7 +1304,7 @@ func V(level Level) Verbose { return newVerbose(level, true) } - // It's off globally but it vmodule may still be set. + // It's off globally but vmodule may still be set. // Here is another cheap but safe test to see if vmodule is enabled. if atomic.LoadInt32(&logging.filterLength) > 0 { // Now we need a proper lock to use the logging structure. The pcs field @@ -1296,7 +1335,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, args...) + logging.print(infoLog, v.logr, v.filter, args...) } } @@ -1304,7 +1343,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, args...) + logging.println(infoLog, v.logr, v.filter, args...) } } @@ -1312,7 +1351,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, format, args...) + logging.printf(infoLog, v.logr, v.filter, format, args...) } } @@ -1320,14 +1359,14 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, msg, keysAndValues...) + logging.infoS(v.logr, v.filter, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, msg, args...) + logging.errorS(err, v.logr, v.filter, msg, args...) } } @@ -1335,32 +1374,32 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, msg, keysAndValues...) + logging.errorS(err, v.logr, v.filter, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, args...) + logging.print(infoLog, logging.logr, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, depth, args...) + logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, args...) + logging.println(infoLog, logging.logr, logging.filter, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, format, args...) + logging.printf(infoLog, logging.logr, logging.filter, format, args...) } // InfoS structured logs to the INFO log. @@ -1372,55 +1411,55 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, msg, keysAndValues...) + logging.infoS(logging.logr, logging.filter, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, args...) + logging.print(warningLog, logging.logr, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, depth, args...) + logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, args...) + logging.println(warningLog, logging.logr, logging.filter, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, format, args...) + logging.printf(warningLog, logging.logr, logging.filter, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, args...) + logging.print(errorLog, logging.logr, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, depth, args...) + logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, args...) + logging.println(errorLog, logging.logr, logging.filter, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, format, args...) + logging.printf(errorLog, logging.logr, logging.filter, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1433,34 +1472,34 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, msg, keysAndValues...) + logging.errorS(err, logging.logr, logging.filter, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1471,27 +1510,42 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, args...) + logging.print(fatalLog, logging.logr, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, depth, args...) + logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, args...) + logging.println(fatalLog, logging.logr, logging.filter, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, format, args...) + logging.printf(fatalLog, logging.logr, logging.filter, format, args...) +} + +// LogFilter is a collection of functions that can filter all logging calls, +// e.g. for sanitization of arguments and prevent accidental leaking of secrets. +type LogFilter interface { + Filter(args []interface{}) []interface{} + FilterF(format string, args []interface{}) (string, []interface{}) + FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) +} + +func SetLogFilter(filter LogFilter) { + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.filter = filter } // ObjectRef references a kubernetes object diff --git a/vendor/modules.txt b/vendor/modules.txt index 108c41af8aee4..a0993a17742ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,7 +16,7 @@ cloud.google.com/go/bigtable/internal/option cloud.google.com/go/storage # github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go v46.4.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible +# github.com/Azure/azure-sdk-for-go v48.2.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network github.com/Azure/azure-sdk-for-go/version @@ -27,7 +27,7 @@ github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest v14.2.0+incompatible github.com/Azure/go-autorest -# github.com/Azure/go-autorest/autorest v0.11.10 +# github.com/Azure/go-autorest/autorest v0.11.11 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure # github.com/Azure/go-autorest/autorest/adal v0.9.5 @@ -48,6 +48,7 @@ github.com/Masterminds/squirrel github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid # github.com/NYTimes/gziphandler v1.1.1 +## explicit github.com/NYTimes/gziphandler # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell @@ -61,7 +62,7 @@ github.com/alecthomas/units # github.com/armon/go-metrics v0.3.3 github.com/armon/go-metrics github.com/armon/go-metrics/prometheus -# github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 +# github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef github.com/asaskevich/govalidator # github.com/aws/aws-lambda-go v1.17.0 ## explicit @@ -70,7 +71,7 @@ github.com/aws/aws-lambda-go/lambda github.com/aws/aws-lambda-go/lambda/handlertrace github.com/aws/aws-lambda-go/lambda/messages github.com/aws/aws-lambda-go/lambdacontext -# github.com/aws/aws-sdk-go v1.35.5 +# github.com/aws/aws-sdk-go v1.35.31 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -159,7 +160,7 @@ github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.4.1-0.20201022071705-85942c5703cf +# github.com/cortexproject/cortex v1.6.0 ## explicit github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts @@ -194,6 +195,12 @@ github.com/cortexproject/cortex/pkg/configs/userconfig github.com/cortexproject/cortex/pkg/cortex github.com/cortexproject/cortex/pkg/distributor github.com/cortexproject/cortex/pkg/flusher +github.com/cortexproject/cortex/pkg/frontend +github.com/cortexproject/cortex/pkg/frontend/transport +github.com/cortexproject/cortex/pkg/frontend/v1 +github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb +github.com/cortexproject/cortex/pkg/frontend/v2 +github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb github.com/cortexproject/cortex/pkg/ingester github.com/cortexproject/cortex/pkg/ingester/client github.com/cortexproject/cortex/pkg/ingester/index @@ -202,11 +209,12 @@ github.com/cortexproject/cortex/pkg/querier github.com/cortexproject/cortex/pkg/querier/astmapper github.com/cortexproject/cortex/pkg/querier/batch github.com/cortexproject/cortex/pkg/querier/chunkstore -github.com/cortexproject/cortex/pkg/querier/frontend github.com/cortexproject/cortex/pkg/querier/iterators github.com/cortexproject/cortex/pkg/querier/lazyquery github.com/cortexproject/cortex/pkg/querier/queryrange github.com/cortexproject/cortex/pkg/querier/series +github.com/cortexproject/cortex/pkg/querier/stats +github.com/cortexproject/cortex/pkg/querier/worker github.com/cortexproject/cortex/pkg/ring github.com/cortexproject/cortex/pkg/ring/client github.com/cortexproject/cortex/pkg/ring/kv @@ -218,15 +226,23 @@ github.com/cortexproject/cortex/pkg/ruler github.com/cortexproject/cortex/pkg/ruler/rules github.com/cortexproject/cortex/pkg/ruler/rules/local github.com/cortexproject/cortex/pkg/ruler/rules/objectclient -github.com/cortexproject/cortex/pkg/storage/backend/azure -github.com/cortexproject/cortex/pkg/storage/backend/filesystem -github.com/cortexproject/cortex/pkg/storage/backend/gcs -github.com/cortexproject/cortex/pkg/storage/backend/s3 +github.com/cortexproject/cortex/pkg/scheduler +github.com/cortexproject/cortex/pkg/scheduler/queue +github.com/cortexproject/cortex/pkg/scheduler/schedulerpb +github.com/cortexproject/cortex/pkg/storage/bucket +github.com/cortexproject/cortex/pkg/storage/bucket/azure +github.com/cortexproject/cortex/pkg/storage/bucket/filesystem +github.com/cortexproject/cortex/pkg/storage/bucket/gcs +github.com/cortexproject/cortex/pkg/storage/bucket/s3 +github.com/cortexproject/cortex/pkg/storage/bucket/swift github.com/cortexproject/cortex/pkg/storage/tsdb +github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex github.com/cortexproject/cortex/pkg/storegateway github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb +github.com/cortexproject/cortex/pkg/tenant github.com/cortexproject/cortex/pkg/util github.com/cortexproject/cortex/pkg/util/chunkcompat +github.com/cortexproject/cortex/pkg/util/concurrency github.com/cortexproject/cortex/pkg/util/extract github.com/cortexproject/cortex/pkg/util/fakeauth github.com/cortexproject/cortex/pkg/util/flagext @@ -234,9 +250,11 @@ github.com/cortexproject/cortex/pkg/util/grpc github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy github.com/cortexproject/cortex/pkg/util/grpc/healthcheck github.com/cortexproject/cortex/pkg/util/grpcclient +github.com/cortexproject/cortex/pkg/util/grpcutil github.com/cortexproject/cortex/pkg/util/limiter github.com/cortexproject/cortex/pkg/util/middleware github.com/cortexproject/cortex/pkg/util/modules +github.com/cortexproject/cortex/pkg/util/process github.com/cortexproject/cortex/pkg/util/push github.com/cortexproject/cortex/pkg/util/runtimeconfig github.com/cortexproject/cortex/pkg/util/services @@ -252,7 +270,7 @@ github.com/davecgh/go-spew/spew github.com/dgrijalva/jwt-go # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f github.com/dgryski/go-rendezvous -# github.com/digitalocean/godo v1.46.0 +# github.com/digitalocean/godo v1.52.0 github.com/digitalocean/godo # github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution/digestset @@ -351,7 +369,7 @@ github.com/go-logr/logr # github.com/go-openapi/analysis v0.19.10 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal -# github.com/go-openapi/errors v0.19.4 +# github.com/go-openapi/errors v0.19.8 => github.com/go-openapi/errors v0.19.4 github.com/go-openapi/errors # github.com/go-openapi/jsonpointer v0.19.3 github.com/go-openapi/jsonpointer @@ -370,11 +388,11 @@ github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security # github.com/go-openapi/spec v0.19.8 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.19.5 +# github.com/go-openapi/strfmt v0.19.11 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.19.9 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.19.8 +# github.com/go-openapi/validate v0.19.14 => github.com/go-openapi/validate v0.19.8 github.com/go-openapi/validate # github.com/go-redis/redis/v8 v8.2.3 github.com/go-redis/redis/v8 @@ -417,7 +435,7 @@ github.com/golang-migrate/migrate/v4/source/file # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru github.com/golang/groupcache/singleflight -# github.com/golang/protobuf v1.4.2 +# github.com/golang/protobuf v1.4.3 github.com/golang/protobuf/descriptor github.com/golang/protobuf/internal/gengogrpc github.com/golang/protobuf/jsonpb @@ -445,7 +463,7 @@ github.com/google/go-cmp/cmp/internal/value github.com/google/go-querystring/query # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz -# github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7 +# github.com/google/pprof v0.0.0-20201117184057-ae444373da19 github.com/google/pprof/profile # github.com/google/uuid v1.1.1 github.com/google/uuid @@ -455,7 +473,7 @@ github.com/googleapis/gax-go/v2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/openapiv2 -# github.com/gophercloud/gophercloud v0.13.0 +# github.com/gophercloud/gophercloud v0.14.0 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips @@ -488,7 +506,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/util/metautils ## explicit github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus -# github.com/grpc-ecosystem/grpc-gateway v1.15.0 +# github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities @@ -569,7 +587,7 @@ github.com/klauspost/compress/gzip github.com/klauspost/cpuid # github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/konsorten/go-windows-terminal-sequences -# github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 +# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 github.com/lann/builder # github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 github.com/lann/ps @@ -592,7 +610,7 @@ github.com/mattn/go-ieproxy github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.31 +# github.com/miekg/dns v1.1.35 github.com/miekg/dns # github.com/minio/md5-simd v1.1.0 github.com/minio/md5-simd @@ -612,7 +630,7 @@ github.com/minio/minio-go/v7/pkg/tags github.com/minio/sha256-simd # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.2.2 +# github.com/mitchellh/mapstructure v1.3.3 ## explicit github.com/mitchellh/mapstructure # github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 @@ -660,7 +678,7 @@ github.com/pierrec/lz4/v4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939 +# github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics github.com/prometheus/alertmanager/api/v1 @@ -699,7 +717,7 @@ github.com/prometheus/alertmanager/store github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.7.1 +# github.com/prometheus/client_golang v1.8.0 ## explicit github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -713,7 +731,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.2.0 ## explicit github.com/prometheus/client_model/go -# github.com/prometheus/common v0.14.0 +# github.com/prometheus/common v0.15.0 ## explicit github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -723,11 +741,11 @@ github.com/prometheus/common/route github.com/prometheus/common/version # github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 github.com/prometheus/node_exporter/https -# github.com/prometheus/procfs v0.1.3 +# github.com/prometheus/procfs v0.2.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20201014093524-73e2ce1bd643 +# github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f ## explicit github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -824,7 +842,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a +# github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader github.com/thanos-io/thanos/pkg/block/metadata @@ -836,6 +854,7 @@ github.com/thanos-io/thanos/pkg/component github.com/thanos-io/thanos/pkg/discovery/cache github.com/thanos-io/thanos/pkg/discovery/dns github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns +github.com/thanos-io/thanos/pkg/errutil github.com/thanos-io/thanos/pkg/extprom github.com/thanos-io/thanos/pkg/gate github.com/thanos-io/thanos/pkg/http @@ -857,7 +876,6 @@ github.com/thanos-io/thanos/pkg/store/hintspb github.com/thanos-io/thanos/pkg/store/labelpb github.com/thanos-io/thanos/pkg/store/storepb github.com/thanos-io/thanos/pkg/store/storepb/prompb -github.com/thanos-io/thanos/pkg/store/storepb/testutil github.com/thanos-io/thanos/pkg/strutil github.com/thanos-io/thanos/pkg/testutil github.com/thanos-io/thanos/pkg/tracing @@ -893,7 +911,7 @@ github.com/uber/jaeger-lib/metrics/prometheus ## explicit # github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec -# github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 +# github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec ## explicit github.com/weaveworks/common/aws github.com/weaveworks/common/errors @@ -993,7 +1011,7 @@ go.etcd.io/etcd/raft/tracker go.etcd.io/etcd/version go.etcd.io/etcd/wal go.etcd.io/etcd/wal/walpb -# go.mongodb.org/mongo-driver v1.3.2 +# go.mongodb.org/mongo-driver v1.4.3 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -1065,7 +1083,7 @@ golang.org/x/lint/golint # golang.org/x/mod v0.3.0 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 +# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b ## explicit golang.org/x/net/bpf golang.org/x/net/context @@ -1085,22 +1103,22 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 +# golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20200930132711-30421366ff76 +# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71 +# golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.3.3 +# golang.org/x/text v0.3.4 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi @@ -1108,7 +1126,7 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20201008025239-9df69603baec +# golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -1124,7 +1142,7 @@ golang.org/x/tools/internal/imports # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.32.0 +# google.golang.org/api v0.35.0 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/googleapi @@ -1141,6 +1159,7 @@ google.golang.org/api/transport/cert google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.6 google.golang.org/appengine google.golang.org/appengine/internal @@ -1165,7 +1184,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.32.0 => google.golang.org/grpc v1.29.1 +# google.golang.org/grpc v1.33.1 => google.golang.org/grpc v1.29.1 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -1283,7 +1302,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.19.2 +# k8s.io/api v0.19.4 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -1325,7 +1344,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.19.2 +# k8s.io/apimachinery v0.19.4 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource @@ -1431,7 +1450,7 @@ k8s.io/client-go/util/workqueue # k8s.io/klog v1.0.0 ## explicit k8s.io/klog -# k8s.io/klog/v2 v2.3.0 +# k8s.io/klog/v2 v2.4.0 k8s.io/klog/v2 # k8s.io/utils v0.0.0-20200729134348-d5654de09c73 k8s.io/utils/buffer @@ -1451,3 +1470,5 @@ sigs.k8s.io/yaml # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # google.golang.org/grpc => google.golang.org/grpc v1.29.1 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab +# github.com/go-openapi/errors => github.com/go-openapi/errors v0.19.4 +# github.com/go-openapi/validate => github.com/go-openapi/validate v0.19.8